diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-04-08 00:34:16 +0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-04-08 00:34:16 +0400 |
commit | 38f4b8c0da01ae7cd9b93386842ce272d6fde9ab (patch) | |
tree | 3c8c52201aac038094bfea7efdd0984a8f62045e /lib | |
parent | a811454027352c762e0d5bba1b1d8f7d26bf96ae (diff) | |
parent | 8e2c4f2844c0e8dcdfe312e5f2204854ca8532c6 (diff) | |
download | linux-38f4b8c0da01ae7cd9b93386842ce272d6fde9ab.tar.xz |
Merge commit 'origin/master' into for-linus/xen/master
* commit 'origin/master': (4825 commits)
Fix build errors due to CONFIG_BRANCH_TRACER=y
parport: Use the PCI IRQ if offered
tty: jsm cleanups
Adjust path to gpio headers
KGDB_SERIAL_CONSOLE check for module
Change KCONFIG name
tty: Blackin CTS/RTS
Change hardware flow control from poll to interrupt driven
Add support for the MAX3100 SPI UART.
lanana: assign a device name and numbering for MAX3100
serqt: initial clean up pass for tty side
tty: Use the generic RS485 ioctl on CRIS
tty: Correct inline types for tty_driver_kref_get()
splice: fix deadlock in splicing to file
nilfs2: support nanosecond timestamp
nilfs2: introduce secondary super block
nilfs2: simplify handling of active state of segments
nilfs2: mark minor flag for checkpoint created by internal operation
nilfs2: clean up sketch file
nilfs2: super block operations fix endian bug
...
Conflicts:
arch/x86/include/asm/thread_info.h
arch/x86/lguest/boot.c
drivers/xen/manage.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 12 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/cpumask.c | 4 | ||||
-rw-r--r-- | lib/debugobjects.c | 127 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 1 | ||||
-rw-r--r-- | lib/decompress_inflate.c | 1 | ||||
-rw-r--r-- | lib/decompress_unlzma.c | 1 | ||||
-rw-r--r-- | lib/dma-debug.c | 955 | ||||
-rw-r--r-- | lib/idr.c | 46 | ||||
-rw-r--r-- | lib/lmb.c | 42 | ||||
-rw-r--r-- | lib/locking-selftest.c | 4 | ||||
-rw-r--r-- | lib/rbtree.c | 14 | ||||
-rw-r--r-- | lib/swiotlb.c | 90 | ||||
-rw-r--r-- | lib/vsprintf.c | 1005 |
15 files changed, 1980 insertions, 327 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2a9c69f34482..8ade0a7a91e0 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -2,6 +2,9 @@ # Library configuration # +config BINARY_PRINTF + def_bool n + menu "Library routines" config BITREVERSE diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0626fa4856e6..9638d99644af 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -796,6 +796,7 @@ config SYSCTL_SYSCALL_CHECK to properly maintain and use. This enables checks that help you to keep things correct. +source mm/Kconfig.debug source kernel/trace/Kconfig config PROVIDE_OHCI1394_DMA_INIT @@ -912,6 +913,17 @@ config DYNAMIC_DEBUG See Documentation/dynamic-debug-howto.txt for additional information. +config DMA_API_DEBUG + bool "Enable debugging of DMA-API usage" + depends on HAVE_DMA_API_DEBUG + help + Enable this option to debug the use of the DMA API by device drivers. + With this option you will be able to detect common bugs in device + drivers like double-freeing of DMA mappings or freeing mappings that + were never allocated. + This option causes a performance degredation. Use only if you want + to debug device drivers. If unsure, say N. + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/lib/Makefile b/lib/Makefile index 051a33a8e028..d6edd6753f40 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -90,6 +90,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o obj-$(CONFIG_NLATTR) += nlattr.o +obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/cpumask.c b/lib/cpumask.c index 3389e2440da0..1f71b97de0f9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) #endif /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ if (*mask) { + unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); unsigned int tail; tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); - memset(cpumask_bits(*mask) + cpumask_size() - tail, - 0, tail); + memset(ptr + cpumask_size() - tail, 0, tail); } return *mask != NULL; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 5d99be1fd988..2755a3bd16a1 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -30,7 +30,7 @@ struct debug_bucket { static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; -static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; +static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; static DEFINE_SPINLOCK(pool_lock); @@ -50,12 +50,23 @@ static int debug_objects_enabled __read_mostly static struct debug_obj_descr *descr_test __read_mostly; +static void free_obj_work(struct work_struct *work); +static DECLARE_WORK(debug_obj_work, free_obj_work); + static int __init enable_object_debug(char *str) { debug_objects_enabled = 1; return 0; } + +static int __init disable_object_debug(char *str) +{ + debug_objects_enabled = 0; + return 0; +} + early_param("debug_objects", enable_object_debug); +early_param("no_debug_objects", disable_object_debug); static const char *obj_states[ODEBUG_STATE_MAX] = { [ODEBUG_STATE_NONE] = "none", @@ -146,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) } /* - * Put the object back into the pool or give it back to kmem_cache: + * workqueue function to free objects. */ -static void free_object(struct debug_obj *obj) +static void free_obj_work(struct work_struct *work) { - unsigned long idx = (unsigned long)(obj - obj_static_pool); + struct debug_obj *obj; unsigned long flags; - if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { - spin_lock_irqsave(&pool_lock, flags); - hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; - obj_pool_used--; - spin_unlock_irqrestore(&pool_lock, flags); - } else { - spin_lock_irqsave(&pool_lock, flags); - obj_pool_used--; + spin_lock_irqsave(&pool_lock, flags); + while (obj_pool_free > ODEBUG_POOL_SIZE) { + obj = hlist_entry(obj_pool.first, typeof(*obj), node); + hlist_del(&obj->node); + obj_pool_free--; + /* + * We release pool_lock across kmem_cache_free() to + * avoid contention on pool_lock. + */ spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); + spin_lock_irqsave(&pool_lock, flags); } + spin_unlock_irqrestore(&pool_lock, flags); +} + +/* + * Put the object back into the pool and schedule work to free objects + * if necessary. + */ +static void free_object(struct debug_obj *obj) +{ + unsigned long flags; + int sched = 0; + + spin_lock_irqsave(&pool_lock, flags); + /* + * schedule work when the pool is filled and the cache is + * initialized: + */ + if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) + sched = !work_pending(&debug_obj_work); + hlist_add_head(&obj->node, &obj_pool); + obj_pool_free++; + obj_pool_used--; + spin_unlock_irqrestore(&pool_lock, flags); + if (sched) + schedule_work(&debug_obj_work); } /* @@ -876,6 +913,63 @@ void __init debug_objects_early_init(void) } /* + * Convert the statically allocated objects to dynamic ones: + */ +static int debug_objects_replace_static_objects(void) +{ + struct debug_bucket *db = obj_hash; + struct hlist_node *node, *tmp; + struct debug_obj *obj, *new; + HLIST_HEAD(objects); + int i, cnt = 0; + + for (i = 0; i < ODEBUG_POOL_SIZE; i++) { + obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); + if (!obj) + goto free; + hlist_add_head(&obj->node, &objects); + } + + /* + * When debug_objects_mem_init() is called we know that only + * one CPU is up, so disabling interrupts is enough + * protection. This avoids the lockdep hell of lock ordering. + */ + local_irq_disable(); + + /* Remove the statically allocated objects from the pool */ + hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) + hlist_del(&obj->node); + /* Move the allocated objects to the pool */ + hlist_move_list(&objects, &obj_pool); + + /* Replace the active object references */ + for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { + hlist_move_list(&db->list, &objects); + + hlist_for_each_entry(obj, node, &objects, node) { + new = hlist_entry(obj_pool.first, typeof(*obj), node); + hlist_del(&new->node); + /* copy object data */ + *new = *obj; + hlist_add_head(&new->node, &db->list); + cnt++; + } + } + + printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, + obj_pool_used); + local_irq_enable(); + return 0; +free: + hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { + hlist_del(&obj->node); + kmem_cache_free(obj_cache, obj); + } + return -ENOMEM; +} + +/* * Called after the kmem_caches are functional to setup a dedicated * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag * prevents that the debug code is called on kmem_cache_free() for the @@ -890,8 +984,11 @@ void __init debug_objects_mem_init(void) sizeof (struct debug_obj), 0, SLAB_DEBUG_OBJECTS, NULL); - if (!obj_cache) + if (!obj_cache || debug_objects_replace_static_objects()) { debug_objects_enabled = 0; - else + if (obj_cache) + kmem_cache_destroy(obj_cache); + printk(KERN_WARNING "ODEBUG: out of memory.\n"); + } else debug_objects_selftest(); } diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 5d3ddb5fcfd9..708e2a86d87b 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -50,6 +50,7 @@ #endif /* !STATIC */ #include <linux/decompress/mm.h> +#include <linux/slab.h> #ifndef INT_MAX #define INT_MAX 0x7fffffff diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 839a329b4fc4..e36b296fc9f8 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -23,6 +23,7 @@ #endif /* STATIC */ #include <linux/decompress/mm.h> +#include <linux/slab.h> #define INBUF_LEN (16*1024) diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 546f2f4c157e..32123a1340e6 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c @@ -34,6 +34,7 @@ #endif /* STATIC */ #include <linux/decompress/mm.h> +#include <linux/slab.h> #define MIN(a, b) (((a) < (b)) ? (a) : (b)) diff --git a/lib/dma-debug.c b/lib/dma-debug.c new file mode 100644 index 000000000000..d3da7edc034f --- /dev/null +++ b/lib/dma-debug.c @@ -0,0 +1,955 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/stacktrace.h> +#include <linux/dma-debug.h> +#include <linux/spinlock.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/slab.h> + +#include <asm/sections.h> + +#define HASH_SIZE 1024ULL +#define HASH_FN_SHIFT 13 +#define HASH_FN_MASK (HASH_SIZE - 1) + +enum { + dma_debug_single, + dma_debug_page, + dma_debug_sg, + dma_debug_coherent, +}; + +#define DMA_DEBUG_STACKTRACE_ENTRIES 5 + +struct dma_debug_entry { + struct list_head list; + struct device *dev; + int type; + phys_addr_t paddr; + u64 dev_addr; + u64 size; + int direction; + int sg_call_ents; + int sg_mapped_ents; +#ifdef CONFIG_STACKTRACE + struct stack_trace stacktrace; + unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; +#endif +}; + +struct hash_bucket { + struct list_head list; + spinlock_t lock; +} ____cacheline_aligned_in_smp; + +/* Hash list to save the allocated dma addresses */ +static struct hash_bucket dma_entry_hash[HASH_SIZE]; +/* List of pre-allocated dma_debug_entry's */ +static LIST_HEAD(free_entries); +/* Lock for the list above */ +static DEFINE_SPINLOCK(free_entries_lock); + +/* Global disable flag - will be set in case of an error */ +static bool global_disable __read_mostly; + +/* Global error count */ +static u32 error_count; + +/* Global error show enable*/ +static u32 show_all_errors __read_mostly; +/* Number of errors to show */ +static u32 show_num_errors = 1; + +static u32 num_free_entries; +static u32 min_free_entries; + +/* number of preallocated entries requested by kernel cmdline */ +static u32 req_entries; + +/* debugfs dentry's for the stuff above */ +static struct dentry *dma_debug_dent __read_mostly; +static struct dentry *global_disable_dent __read_mostly; +static struct dentry *error_count_dent __read_mostly; +static struct dentry *show_all_errors_dent __read_mostly; +static struct dentry *show_num_errors_dent __read_mostly; +static struct dentry *num_free_entries_dent __read_mostly; +static struct dentry *min_free_entries_dent __read_mostly; + +static const char *type2name[4] = { "single", "page", + "scather-gather", "coherent" }; + +static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE" }; + +/* + * The access to some variables in this macro is racy. We can't use atomic_t + * here because all these variables are exported to debugfs. Some of them even + * writeable. This is also the reason why a lock won't help much. But anyway, + * the races are no big deal. Here is why: + * + * error_count: the addition is racy, but the worst thing that can happen is + * that we don't count some errors + * show_num_errors: the subtraction is racy. Also no big deal because in + * worst case this will result in one warning more in the + * system log than the user configured. This variable is + * writeable via debugfs. + */ +static inline void dump_entry_trace(struct dma_debug_entry *entry) +{ +#ifdef CONFIG_STACKTRACE + if (entry) { + printk(KERN_WARNING "Mapped at:\n"); + print_stack_trace(&entry->stacktrace, 0); + } +#endif +} + +#define err_printk(dev, entry, format, arg...) do { \ + error_count += 1; \ + if (show_all_errors || show_num_errors > 0) { \ + WARN(1, "%s %s: " format, \ + dev_driver_string(dev), \ + dev_name(dev) , ## arg); \ + dump_entry_trace(entry); \ + } \ + if (!show_all_errors && show_num_errors > 0) \ + show_num_errors -= 1; \ + } while (0); + +/* + * Hash related functions + * + * Every DMA-API request is saved into a struct dma_debug_entry. To + * have quick access to these structs they are stored into a hash. + */ +static int hash_fn(struct dma_debug_entry *entry) +{ + /* + * Hash function is based on the dma address. + * We use bits 20-27 here as the index into the hash + */ + return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; +} + +/* + * Request exclusive access to a hash bucket for a given dma_debug_entry. + */ +static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, + unsigned long *flags) +{ + int idx = hash_fn(entry); + unsigned long __flags; + + spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); + *flags = __flags; + return &dma_entry_hash[idx]; +} + +/* + * Give up exclusive access to the hash bucket + */ +static void put_hash_bucket(struct hash_bucket *bucket, + unsigned long *flags) +{ + unsigned long __flags = *flags; + + spin_unlock_irqrestore(&bucket->lock, __flags); +} + +/* + * Search a given entry in the hash bucket list + */ +static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, + struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + + list_for_each_entry(entry, &bucket->list, list) { + if ((entry->dev_addr == ref->dev_addr) && + (entry->dev == ref->dev)) + return entry; + } + + return NULL; +} + +/* + * Add an entry to a hash bucket + */ +static void hash_bucket_add(struct hash_bucket *bucket, + struct dma_debug_entry *entry) +{ + list_add_tail(&entry->list, &bucket->list); +} + +/* + * Remove entry from a hash bucket list + */ +static void hash_bucket_del(struct dma_debug_entry *entry) +{ + list_del(&entry->list); +} + +/* + * Dump mapping entries for debugging purposes + */ +void debug_dma_dump_mappings(struct device *dev) +{ + int idx; + + for (idx = 0; idx < HASH_SIZE; idx++) { + struct hash_bucket *bucket = &dma_entry_hash[idx]; + struct dma_debug_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&bucket->lock, flags); + + list_for_each_entry(entry, &bucket->list, list) { + if (!dev || dev == entry->dev) { + dev_info(entry->dev, + "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", + type2name[entry->type], idx, + (unsigned long long)entry->paddr, + entry->dev_addr, entry->size, + dir2name[entry->direction]); + } + } + + spin_unlock_irqrestore(&bucket->lock, flags); + } +} +EXPORT_SYMBOL(debug_dma_dump_mappings); + +/* + * Wrapper function for adding an entry to the hash. + * This function takes care of locking itself. + */ +static void add_dma_entry(struct dma_debug_entry *entry) +{ + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(entry, &flags); + hash_bucket_add(bucket, entry); + put_hash_bucket(bucket, &flags); +} + +/* struct dma_entry allocator + * + * The next two functions implement the allocator for + * struct dma_debug_entries. + */ +static struct dma_debug_entry *dma_entry_alloc(void) +{ + struct dma_debug_entry *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&free_entries_lock, flags); + + if (list_empty(&free_entries)) { + printk(KERN_ERR "DMA-API: debugging out of memory " + "- disabling\n"); + global_disable = true; + goto out; + } + + entry = list_entry(free_entries.next, struct dma_debug_entry, list); + list_del(&entry->list); + memset(entry, 0, sizeof(*entry)); + +#ifdef CONFIG_STACKTRACE + entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; + entry->stacktrace.entries = entry->st_entries; + entry->stacktrace.skip = 2; + save_stack_trace(&entry->stacktrace); +#endif + num_free_entries -= 1; + if (num_free_entries < min_free_entries) + min_free_entries = num_free_entries; + +out: + spin_unlock_irqrestore(&free_entries_lock, flags); + + return entry; +} + +static void dma_entry_free(struct dma_debug_entry *entry) +{ + unsigned long flags; + + /* + * add to beginning of the list - this way the entries are + * more likely cache hot when they are reallocated. + */ + spin_lock_irqsave(&free_entries_lock, flags); + list_add(&entry->list, &free_entries); + num_free_entries += 1; + spin_unlock_irqrestore(&free_entries_lock, flags); +} + +/* + * DMA-API debugging init code + * + * The init code does two things: + * 1. Initialize core data structures + * 2. Preallocate a given number of dma_debug_entry structs + */ + +static int prealloc_memory(u32 num_entries) +{ + struct dma_debug_entry *entry, *next_entry; + int i; + + for (i = 0; i < num_entries; ++i) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto out_err; + + list_add_tail(&entry->list, &free_entries); + } + + num_free_entries = num_entries; + min_free_entries = num_entries; + + printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", + num_entries); + + return 0; + +out_err: + + list_for_each_entry_safe(entry, next_entry, &free_entries, list) { + list_del(&entry->list); + kfree(entry); + } + + return -ENOMEM; +} + +static int dma_debug_fs_init(void) +{ + dma_debug_dent = debugfs_create_dir("dma-api", NULL); + if (!dma_debug_dent) { + printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); + return -ENOMEM; + } + + global_disable_dent = debugfs_create_bool("disabled", 0444, + dma_debug_dent, + (u32 *)&global_disable); + if (!global_disable_dent) + goto out_err; + + error_count_dent = debugfs_create_u32("error_count", 0444, + dma_debug_dent, &error_count); + if (!error_count_dent) + goto out_err; + + show_all_errors_dent = debugfs_create_u32("all_errors", 0644, + dma_debug_dent, + &show_all_errors); + if (!show_all_errors_dent) + goto out_err; + + show_num_errors_dent = debugfs_create_u32("num_errors", 0644, + dma_debug_dent, + &show_num_errors); + if (!show_num_errors_dent) + goto out_err; + + num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, + dma_debug_dent, + &num_free_entries); + if (!num_free_entries_dent) + goto out_err; + + min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, + dma_debug_dent, + &min_free_entries); + if (!min_free_entries_dent) + goto out_err; + + return 0; + +out_err: + debugfs_remove_recursive(dma_debug_dent); + + return -ENOMEM; +} + +static int device_dma_allocations(struct device *dev) +{ + struct dma_debug_entry *entry; + unsigned long flags; + int count = 0, i; + + for (i = 0; i < HASH_SIZE; ++i) { + spin_lock_irqsave(&dma_entry_hash[i].lock, flags); + list_for_each_entry(entry, &dma_entry_hash[i].list, list) { + if (entry->dev == dev) + count += 1; + } + spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); + } + + return count; +} + +static int dma_debug_device_change(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + int count; + + + switch (action) { + case BUS_NOTIFY_UNBIND_DRIVER: + count = device_dma_allocations(dev); + if (count == 0) + break; + err_printk(dev, NULL, "DMA-API: device driver has pending " + "DMA allocations while released from device " + "[count=%d]\n", count); + break; + default: + break; + } + + return 0; +} + +void dma_debug_add_bus(struct bus_type *bus) +{ + struct notifier_block *nb; + + nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); + if (nb == NULL) { + printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); + return; + } + + nb->notifier_call = dma_debug_device_change; + + bus_register_notifier(bus, nb); +} + +/* + * Let the architectures decide how many entries should be preallocated. + */ +void dma_debug_init(u32 num_entries) +{ + int i; + + if (global_disable) + return; + + for (i = 0; i < HASH_SIZE; ++i) { + INIT_LIST_HEAD(&dma_entry_hash[i].list); + dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; + } + + if (dma_debug_fs_init() != 0) { + printk(KERN_ERR "DMA-API: error creating debugfs entries " + "- disabling\n"); + global_disable = true; + + return; + } + + if (req_entries) + num_entries = req_entries; + + if (prealloc_memory(num_entries) != 0) { + printk(KERN_ERR "DMA-API: debugging out of memory error " + "- disabled\n"); + global_disable = true; + + return; + } + + printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); +} + +static __init int dma_debug_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (strncmp(str, "off", 3) == 0) { + printk(KERN_INFO "DMA-API: debugging disabled on kernel " + "command line\n"); + global_disable = true; + } + + return 0; +} + +static __init int dma_debug_entries_cmdline(char *str) +{ + int res; + + if (!str) + return -EINVAL; + + res = get_option(&str, &req_entries); + + if (!res) + req_entries = 0; + + return 0; +} + +__setup("dma_debug=", dma_debug_cmdline); +__setup("dma_debug_entries=", dma_debug_entries_cmdline); + +static void check_unmap(struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + if (dma_mapping_error(ref->dev, ref->dev_addr)) { + err_printk(ref->dev, NULL, "DMA-API: device driver tries " + "to free an invalid DMA memory address\n"); + return; + } + + bucket = get_hash_bucket(ref, &flags); + entry = hash_bucket_find(bucket, ref); + + if (!entry) { + err_printk(ref->dev, NULL, "DMA-API: device driver tries " + "to free DMA memory it has not allocated " + "[device address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); + goto out; + } + + if (ref->size != entry->size) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different size " + "[device address=0x%016llx] [map size=%llu bytes] " + "[unmap size=%llu bytes]\n", + ref->dev_addr, entry->size, ref->size); + } + + if (ref->type != entry->type) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with wrong function " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s] [unmapped as %s]\n", + ref->dev_addr, ref->size, + type2name[entry->type], type2name[ref->type]); + } else if ((entry->type == dma_debug_coherent) && + (ref->paddr != entry->paddr)) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different CPU address " + "[device address=0x%016llx] [size=%llu bytes] " + "[cpu alloc address=%p] [cpu free address=%p]", + ref->dev_addr, ref->size, + (void *)entry->paddr, (void *)ref->paddr); + } + + if (ref->sg_call_ents && ref->type == dma_debug_sg && + ref->sg_call_ents != entry->sg_call_ents) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA sg list with different entry count " + "[map count=%d] [unmap count=%d]\n", + entry->sg_call_ents, ref->sg_call_ents); + } + + /* + * This may be no bug in reality - but most implementations of the + * DMA API don't handle this properly, so check for it here + */ + if (ref->direction != entry->direction) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [unmapped with %s]\n", + ref->dev_addr, ref->size, + dir2name[entry->direction], + dir2name[ref->direction]); + } + + hash_bucket_del(entry); + dma_entry_free(entry); + +out: + put_hash_bucket(bucket, &flags); +} + +static void check_for_stack(struct device *dev, void *addr) +{ + if (object_is_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); +} + +static inline bool overlap(void *addr, u64 size, void *start, void *end) +{ + void *addr2 = (char *)addr + size; + + return ((addr >= start && addr < end) || + (addr2 >= start && addr2 < end) || + ((addr < start) && (addr2 >= end))); +} + +static void check_for_illegal_area(struct device *dev, void *addr, u64 size) +{ + if (overlap(addr, size, _text, _etext) || + overlap(addr, size, __start_rodata, __end_rodata)) + err_printk(dev, NULL, "DMA-API: device driver maps " + "memory from kernel text or rodata " + "[addr=%p] [size=%llu]\n", addr, size); +} + +static void check_sync(struct device *dev, dma_addr_t addr, + u64 size, u64 offset, int direction, bool to_cpu) +{ + struct dma_debug_entry ref = { + .dev = dev, + .dev_addr = addr, + .size = size, + .direction = direction, + }; + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(&ref, &flags); + + entry = hash_bucket_find(bucket, &ref); + + if (!entry) { + err_printk(dev, NULL, "DMA-API: device driver tries " + "to sync DMA memory it has not allocated " + "[device address=0x%016llx] [size=%llu bytes]\n", + (unsigned long long)addr, size); + goto out; + } + + if ((offset + size) > entry->size) { + err_printk(dev, entry, "DMA-API: device driver syncs" + " DMA memory outside allocated range " + "[device address=0x%016llx] " + "[allocation size=%llu bytes] [sync offset=%llu] " + "[sync size=%llu]\n", entry->dev_addr, entry->size, + offset, size); + } + + if (direction != entry->direction) { + err_printk(dev, entry, "DMA-API: device driver syncs " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + } + + if (entry->direction == DMA_BIDIRECTIONAL) + goto out; + + if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && + !(direction == DMA_TO_DEVICE)) + err_printk(dev, entry, "DMA-API: device driver syncs " + "device read-only DMA memory for cpu " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + + if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && + !(direction == DMA_FROM_DEVICE)) + err_printk(dev, entry, "DMA-API: device driver syncs " + "device write-only DMA memory to device " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + +out: + put_hash_bucket(bucket, &flags); + +} + +void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, + size_t size, int direction, dma_addr_t dma_addr, + bool map_single) +{ + struct dma_debug_entry *entry; + + if (unlikely(global_disable)) + return; + + if (unlikely(dma_mapping_error(dev, dma_addr))) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->dev = dev; + entry->type = dma_debug_page; + entry->paddr = page_to_phys(page) + offset; + entry->dev_addr = dma_addr; + entry->size = size; + entry->direction = direction; + + if (map_single) + entry->type = dma_debug_single; + + if (!PageHighMem(page)) { + void *addr = ((char *)page_address(page)) + offset; + check_for_stack(dev, addr); + check_for_illegal_area(dev, addr, size); + } + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_map_page); + +void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single) +{ + struct dma_debug_entry ref = { + .type = dma_debug_page, + .dev = dev, + .dev_addr = addr, + .size = size, + .direction = direction, + }; + + if (unlikely(global_disable)) + return; + + if (map_single) + ref.type = dma_debug_single; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_unmap_page); + +void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ + struct dma_debug_entry *entry; + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, mapped_ents, i) { + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_sg; + entry->dev = dev; + entry->paddr = sg_phys(s); + entry->size = s->length; + entry->dev_addr = s->dma_address; + entry->direction = direction; + entry->sg_call_ents = nents; + entry->sg_mapped_ents = mapped_ents; + + if (!PageHighMem(sg_page(s))) { + check_for_stack(dev, sg_virt(s)); + check_for_illegal_area(dev, sg_virt(s), s->length); + } + + add_dma_entry(entry); + } +} +EXPORT_SYMBOL(debug_dma_map_sg); + +void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir) +{ + struct dma_debug_entry *entry; + struct scatterlist *s; + int mapped_ents = 0, i; + unsigned long flags; + + if (unlikely(global_disable)) + return; + + for_each_sg(sglist, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .paddr = sg_phys(s), + .dev_addr = s->dma_address, + .size = s->length, + .direction = dir, + .sg_call_ents = 0, + }; + + if (mapped_ents && i >= mapped_ents) + break; + + if (mapped_ents == 0) { + struct hash_bucket *bucket; + ref.sg_call_ents = nelems; + bucket = get_hash_bucket(&ref, &flags); + entry = hash_bucket_find(bucket, &ref); + if (entry) + mapped_ents = entry->sg_mapped_ents; + put_hash_bucket(bucket, &flags); + } + + check_unmap(&ref); + } +} +EXPORT_SYMBOL(debug_dma_unmap_sg); + +void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ + struct dma_debug_entry *entry; + + if (unlikely(global_disable)) + return; + + if (unlikely(virt == NULL)) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_coherent; + entry->dev = dev; + entry->paddr = virt_to_phys(virt); + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = DMA_BIDIRECTIONAL; + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_alloc_coherent); + +void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ + struct dma_debug_entry ref = { + .type = dma_debug_coherent, + .dev = dev, + .paddr = virt_to_phys(virt), + .dev_addr = addr, + .size = size, + .direction = DMA_BIDIRECTIONAL, + }; + + if (unlikely(global_disable)) + return; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_free_coherent); + +void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, 0, direction, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); + +void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, 0, direction, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_device); + +void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, size_t size, + int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, offset, direction, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); + +void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, offset, direction, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); + +void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, nelems, i) { + check_sync(dev, s->dma_address, s->dma_length, 0, + direction, true); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); + +void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, nelems, i) { + check_sync(dev, s->dma_address, s->dma_length, 0, + direction, false); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_device); + diff --git a/lib/idr.c b/lib/idr.c index dab4bca86f5d..80ca9aca038b 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -579,6 +579,52 @@ int idr_for_each(struct idr *idp, EXPORT_SYMBOL(idr_for_each); /** + * idr_get_next - lookup next object of id to given id. + * @idp: idr handle + * @id: pointer to lookup key + * + * Returns pointer to registered object with id, which is next number to + * given id. + */ + +void *idr_get_next(struct idr *idp, int *nextidp) +{ + struct idr_layer *p, *pa[MAX_LEVEL]; + struct idr_layer **paa = &pa[0]; + int id = *nextidp; + int n, max; + + /* find first ent */ + n = idp->layers * IDR_BITS; + max = 1 << n; + p = rcu_dereference(idp->top); + if (!p) + return NULL; + + while (id < max) { + while (n > 0 && p) { + n -= IDR_BITS; + *paa++ = p; + p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + } + + if (p) { + *nextidp = id; + return p; + } + + id += 1 << n; + while (n < fls(id)) { + n += IDR_BITS; + p = *--paa; + } + } + return NULL; +} + + + +/** * idr_replace - replace pointer for given id * @idp: idr handle * @ptr: pointer you want associated with the id diff --git a/lib/lmb.c b/lib/lmb.c index 97e547037084..e4a6482d8b26 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -29,33 +29,33 @@ static int __init early_lmb(char *p) } early_param("lmb", early_lmb); -void lmb_dump_all(void) +static void lmb_dump(struct lmb_region *region, char *name) { - unsigned long i; + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); + + for (i = 0; i < region->cnt; i++) { + base = region->region[i].base; + size = region->region[i].size; + + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); + } +} +void lmb_dump_all(void) +{ if (!lmb_debug) return; - pr_info("lmb_dump_all:\n"); - pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - pr_info(" memory.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.memory.cnt ;i++) { - pr_info(" memory.region[0x%lx].base = 0x%llx\n", - i, (unsigned long long)lmb.memory.region[i].base); - pr_info(" .size = 0x%llx\n", - (unsigned long long)lmb.memory.region[i].size); - } + pr_info("LMB configuration:\n"); + pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); - pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - pr_info(" reserved.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.reserved.cnt ;i++) { - pr_info(" reserved.region[0x%lx].base = 0x%llx\n", - i, (unsigned long long)lmb.reserved.region[i].base); - pr_info(" .size = 0x%llx\n", - (unsigned long long)lmb.reserved.region[i].size); - } + lmb_dump(&lmb.memory, "memory"); + lmb_dump(&lmb.reserved, "reserved"); } static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 280332c1827c..619313ed6c46 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -157,11 +157,11 @@ static void init_shared_classes(void) #define SOFTIRQ_ENTER() \ local_bh_disable(); \ local_irq_disable(); \ - trace_softirq_enter(); \ + lockdep_softirq_enter(); \ WARN_ON(!in_softirq()); #define SOFTIRQ_EXIT() \ - trace_softirq_exit(); \ + lockdep_softirq_exit(); \ local_irq_enable(); \ local_bh_enable(); diff --git a/lib/rbtree.c b/lib/rbtree.c index 9956b99649f0..f653659e0bc1 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -163,17 +163,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, { if (!other->rb_right || rb_is_black(other->rb_right)) { - struct rb_node *o_left; - if ((o_left = other->rb_left)) - rb_set_black(o_left); + rb_set_black(other->rb_left); rb_set_red(other); __rb_rotate_right(other, root); other = parent->rb_right; } rb_set_color(other, rb_color(parent)); rb_set_black(parent); - if (other->rb_right) - rb_set_black(other->rb_right); + rb_set_black(other->rb_right); __rb_rotate_left(parent, root); node = root->rb_node; break; @@ -200,17 +197,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, { if (!other->rb_left || rb_is_black(other->rb_left)) { - register struct rb_node *o_right; - if ((o_right = other->rb_right)) - rb_set_black(o_right); + rb_set_black(other->rb_right); rb_set_red(other); __rb_rotate_left(other, root); other = parent->rb_left; } rb_set_color(other, rb_color(parent)); rb_set_black(parent); - if (other->rb_left) - rb_set_black(other->rb_left); + rb_set_black(other->rb_left); __rb_rotate_right(parent, root); node = root->rb_node; break; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1f991acc2a05..2b0b5a7d2ced 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) return phys_to_virt(swiotlb_bus_to_phys(address)); } -int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) +int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) { return 0; } @@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); } -static inline int range_needs_mapping(void *ptr, size_t size) +static inline int range_needs_mapping(phys_addr_t paddr, size_t size) { - return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); + return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); } static int is_swiotlb_buffer(char *addr) @@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t dev_addr; void *ret; int order = get_order(size); - u64 dma_mask = DMA_32BIT_MASK; + u64 dma_mask = DMA_BIT_MASK(32); if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; @@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ -dma_addr_t -swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, - int dir, struct dma_attrs *attrs) -{ - dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); +dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + void *ptr = page_address(page) + offset; + dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); void *map; BUG_ON(dir == DMA_NONE); @@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(hwdev, dev_addr, size) && - !range_needs_mapping(ptr, size)) + if (!address_needs_mapping(dev, dev_addr, size) && + !range_needs_mapping(virt_to_phys(ptr), size)) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ - map = map_single(hwdev, virt_to_phys(ptr), size, dir); + map = map_single(dev, phys, size, dir); if (!map) { - swiotlb_full(hwdev, size, dir, 1); + swiotlb_full(dev, size, dir, 1); map = io_tlb_overflow_buffer; } - dev_addr = swiotlb_virt_to_bus(hwdev, map); + dev_addr = swiotlb_virt_to_bus(dev, map); /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(hwdev, dev_addr, size)) + if (address_needs_mapping(dev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; } -EXPORT_SYMBOL(swiotlb_map_single_attrs); - -dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) -{ - return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); -} -EXPORT_SYMBOL(swiotlb_map_single); +EXPORT_SYMBOL_GPL(swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single); * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -void -swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir, struct dma_attrs *attrs) +void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { char *dma_addr = swiotlb_bus_to_virt(dev_addr); @@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } -EXPORT_SYMBOL(swiotlb_unmap_single_attrs); - -void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) -{ - return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); -} -EXPORT_SYMBOL(swiotlb_unmap_single); +EXPORT_SYMBOL_GPL(swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation @@ -736,7 +724,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } @@ -744,7 +732,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } @@ -769,7 +757,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_CPU); @@ -778,7 +767,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_DEVICE); @@ -803,7 +793,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir, struct dma_attrs *attrs) + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -811,10 +801,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - void *addr = sg_virt(sg); - dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); + phys_addr_t paddr = sg_phys(sg); + dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); - if (range_needs_mapping(addr, sg->length) || + if (range_needs_mapping(paddr, sg->length) || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); @@ -850,7 +840,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir, struct dma_attrs *attrs) + int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -858,11 +848,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) + if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(sg_virt(sg), sg->dma_length); + dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); } } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); @@ -892,17 +882,17 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) + if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(sg_virt(sg), sg->dma_length); + dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); } } void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } @@ -910,7 +900,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0fbd0121d91d..be3001f912e4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -396,7 +396,38 @@ static noinline char* put_dec(char *buf, unsigned long long num) #define SMALL 32 /* Must be 32 == 0x20 */ #define SPECIAL 64 /* 0x */ -static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) +enum format_type { + FORMAT_TYPE_NONE, /* Just a string part */ + FORMAT_TYPE_WIDTH, + FORMAT_TYPE_PRECISION, + FORMAT_TYPE_CHAR, + FORMAT_TYPE_STR, + FORMAT_TYPE_PTR, + FORMAT_TYPE_PERCENT_CHAR, + FORMAT_TYPE_INVALID, + FORMAT_TYPE_LONG_LONG, + FORMAT_TYPE_ULONG, + FORMAT_TYPE_LONG, + FORMAT_TYPE_USHORT, + FORMAT_TYPE_SHORT, + FORMAT_TYPE_UINT, + FORMAT_TYPE_INT, + FORMAT_TYPE_NRCHARS, + FORMAT_TYPE_SIZE_T, + FORMAT_TYPE_PTRDIFF +}; + +struct printf_spec { + enum format_type type; + int flags; /* flags to number() */ + int field_width; /* width of output field */ + int base; + int precision; /* # of digits/chars */ + int qualifier; +}; + +static char *number(char *buf, char *end, unsigned long long num, + struct printf_spec spec) { /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ @@ -404,32 +435,32 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int char tmp[66]; char sign; char locase; - int need_pfx = ((type & SPECIAL) && base != 10); + int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); int i; /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ - locase = (type & SMALL); - if (type & LEFT) - type &= ~ZEROPAD; + locase = (spec.flags & SMALL); + if (spec.flags & LEFT) + spec.flags &= ~ZEROPAD; sign = 0; - if (type & SIGN) { + if (spec.flags & SIGN) { if ((signed long long) num < 0) { sign = '-'; num = - (signed long long) num; - size--; - } else if (type & PLUS) { + spec.field_width--; + } else if (spec.flags & PLUS) { sign = '+'; - size--; - } else if (type & SPACE) { + spec.field_width--; + } else if (spec.flags & SPACE) { sign = ' '; - size--; + spec.field_width--; } } if (need_pfx) { - size--; - if (base == 16) - size--; + spec.field_width--; + if (spec.base == 16) + spec.field_width--; } /* generate full string in tmp[], in reverse order */ @@ -441,10 +472,10 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int tmp[i++] = (digits[do_div(num,base)] | locase); } while (num != 0); */ - else if (base != 10) { /* 8 or 16 */ - int mask = base - 1; + else if (spec.base != 10) { /* 8 or 16 */ + int mask = spec.base - 1; int shift = 3; - if (base == 16) shift = 4; + if (spec.base == 16) shift = 4; do { tmp[i++] = (digits[((unsigned char)num) & mask] | locase); num >>= shift; @@ -454,12 +485,12 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int } /* printing 100 using %2d gives "100", not "00" */ - if (i > precision) - precision = i; + if (i > spec.precision) + spec.precision = i; /* leading space padding */ - size -= precision; - if (!(type & (ZEROPAD+LEFT))) { - while(--size >= 0) { + spec.field_width -= spec.precision; + if (!(spec.flags & (ZEROPAD+LEFT))) { + while(--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; @@ -476,23 +507,23 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int if (buf < end) *buf = '0'; ++buf; - if (base == 16) { + if (spec.base == 16) { if (buf < end) *buf = ('X' | locase); ++buf; } } /* zero or space padding */ - if (!(type & LEFT)) { - char c = (type & ZEROPAD) ? '0' : ' '; - while (--size >= 0) { + if (!(spec.flags & LEFT)) { + char c = (spec.flags & ZEROPAD) ? '0' : ' '; + while (--spec.field_width >= 0) { if (buf < end) *buf = c; ++buf; } } /* hmm even more zero padding? */ - while (i <= --precision) { + while (i <= --spec.precision) { if (buf < end) *buf = '0'; ++buf; @@ -504,7 +535,7 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int ++buf; } /* trailing space padding */ - while (--size >= 0) { + while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; @@ -512,17 +543,17 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int return buf; } -static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) +static char *string(char *buf, char *end, char *s, struct printf_spec spec) { int len, i; if ((unsigned long)s < PAGE_SIZE) s = "<NULL>"; - len = strnlen(s, precision); + len = strnlen(s, spec.precision); - if (!(flags & LEFT)) { - while (len < field_width--) { + if (!(spec.flags & LEFT)) { + while (len < spec.field_width--) { if (buf < end) *buf = ' '; ++buf; @@ -533,7 +564,7 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio *buf = *s; ++buf; ++s; } - while (len < field_width--) { + while (len < spec.field_width--) { if (buf < end) *buf = ' '; ++buf; @@ -541,21 +572,24 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } -static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) +static char *symbol_string(char *buf, char *end, void *ptr, + struct printf_spec spec) { unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; sprint_symbol(sym, value); - return string(buf, end, sym, field_width, precision, flags); + return string(buf, end, sym, spec); #else - field_width = 2*sizeof(void *); - flags |= SPECIAL | SMALL | ZEROPAD; - return number(buf, end, value, 16, field_width, precision, flags); + spec.field_width = 2*sizeof(void *); + spec.flags |= SPECIAL | SMALL | ZEROPAD; + spec.base = 16; + return number(buf, end, value, spec); #endif } -static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) +static char *resource_string(char *buf, char *end, struct resource *res, + struct printf_spec spec) { #ifndef IO_RSRC_PRINTK_SIZE #define IO_RSRC_PRINTK_SIZE 4 @@ -564,7 +598,11 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie #ifndef MEM_RSRC_PRINTK_SIZE #define MEM_RSRC_PRINTK_SIZE 8 #endif - + struct printf_spec num_spec = { + .base = 16, + .precision = -1, + .flags = SPECIAL | SMALL | ZEROPAD, + }; /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ char sym[4*sizeof(resource_size_t) + 8]; char *p = sym, *pend = sym + sizeof(sym); @@ -576,17 +614,18 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie size = MEM_RSRC_PRINTK_SIZE; *p++ = '['; - p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); + num_spec.field_width = size; + p = number(p, pend, res->start, num_spec); *p++ = '-'; - p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); + p = number(p, pend, res->end, num_spec); *p++ = ']'; *p = 0; - return string(buf, end, sym, field_width, precision, flags); + return string(buf, end, sym, spec); } -static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, - int precision, int flags) +static char *mac_address_string(char *buf, char *end, u8 *addr, + struct printf_spec spec) { char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ char *p = mac_addr; @@ -594,16 +633,17 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); - if (!(flags & SPECIAL) && i != 5) + if (!(spec.flags & SPECIAL) && i != 5) *p++ = ':'; } *p = '\0'; + spec.flags &= ~SPECIAL; - return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); + return string(buf, end, mac_addr, spec); } -static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, - int precision, int flags) +static char *ip6_addr_string(char *buf, char *end, u8 *addr, + struct printf_spec spec) { char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ char *p = ip6_addr; @@ -612,16 +652,17 @@ static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, for (i = 0; i < 8; i++) { p = pack_hex_byte(p, addr[2 * i]); p = pack_hex_byte(p, addr[2 * i + 1]); - if (!(flags & SPECIAL) && i != 7) + if (!(spec.flags & SPECIAL) && i != 7) *p++ = ':'; } *p = '\0'; + spec.flags &= ~SPECIAL; - return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); + return string(buf, end, ip6_addr, spec); } -static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, - int precision, int flags) +static char *ip4_addr_string(char *buf, char *end, u8 *addr, + struct printf_spec spec) { char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ char temp[3]; /* hold each IP quad in reverse order */ @@ -637,8 +678,9 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, *p++ = '.'; } *p = '\0'; + spec.flags &= ~SPECIAL; - return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); + return string(buf, end, ip4_addr, spec); } /* @@ -663,41 +705,233 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, * function pointers are really function descriptors, which contain a * pointer to the real address. */ -static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) +static char *pointer(const char *fmt, char *buf, char *end, void *ptr, + struct printf_spec spec) { if (!ptr) - return string(buf, end, "(null)", field_width, precision, flags); + return string(buf, end, "(null)", spec); switch (*fmt) { case 'F': ptr = dereference_function_descriptor(ptr); /* Fallthrough */ case 'S': - return symbol_string(buf, end, ptr, field_width, precision, flags); + return symbol_string(buf, end, ptr, spec); case 'R': - return resource_string(buf, end, ptr, field_width, precision, flags); + return resource_string(buf, end, ptr, spec); case 'm': - flags |= SPECIAL; + spec.flags |= SPECIAL; /* Fallthrough */ case 'M': - return mac_address_string(buf, end, ptr, field_width, precision, flags); + return mac_address_string(buf, end, ptr, spec); case 'i': - flags |= SPECIAL; + spec.flags |= SPECIAL; /* Fallthrough */ case 'I': if (fmt[1] == '6') - return ip6_addr_string(buf, end, ptr, field_width, precision, flags); + return ip6_addr_string(buf, end, ptr, spec); if (fmt[1] == '4') - return ip4_addr_string(buf, end, ptr, field_width, precision, flags); - flags &= ~SPECIAL; + return ip4_addr_string(buf, end, ptr, spec); + spec.flags &= ~SPECIAL; + break; + } + spec.flags |= SMALL; + if (spec.field_width == -1) { + spec.field_width = 2*sizeof(void *); + spec.flags |= ZEROPAD; + } + spec.base = 16; + + return number(buf, end, (unsigned long) ptr, spec); +} + +/* + * Helper function to decode printf style format. + * Each call decode a token from the format and return the + * number of characters read (or likely the delta where it wants + * to go on the next call). + * The decoded token is returned through the parameters + * + * 'h', 'l', or 'L' for integer fields + * 'z' support added 23/7/1999 S.H. + * 'z' changed to 'Z' --davidm 1/25/99 + * 't' added for ptrdiff_t + * + * @fmt: the format string + * @type of the token returned + * @flags: various flags such as +, -, # tokens.. + * @field_width: overwritten width + * @base: base of the number (octal, hex, ...) + * @precision: precision of a number + * @qualifier: qualifier of a number (long, size_t, ...) + */ +static int format_decode(const char *fmt, struct printf_spec *spec) +{ + const char *start = fmt; + + /* we finished early by reading the field width */ + if (spec->type == FORMAT_TYPE_WIDTH) { + if (spec->field_width < 0) { + spec->field_width = -spec->field_width; + spec->flags |= LEFT; + } + spec->type = FORMAT_TYPE_NONE; + goto precision; + } + + /* we finished early by reading the precision */ + if (spec->type == FORMAT_TYPE_PRECISION) { + if (spec->precision < 0) + spec->precision = 0; + + spec->type = FORMAT_TYPE_NONE; + goto qualifier; + } + + /* By default */ + spec->type = FORMAT_TYPE_NONE; + + for (; *fmt ; ++fmt) { + if (*fmt == '%') + break; + } + + /* Return the current non-format string */ + if (fmt != start || !*fmt) + return fmt - start; + + /* Process flags */ + spec->flags = 0; + + while (1) { /* this also skips first '%' */ + bool found = true; + + ++fmt; + + switch (*fmt) { + case '-': spec->flags |= LEFT; break; + case '+': spec->flags |= PLUS; break; + case ' ': spec->flags |= SPACE; break; + case '#': spec->flags |= SPECIAL; break; + case '0': spec->flags |= ZEROPAD; break; + default: found = false; + } + + if (!found) + break; + } + + /* get field width */ + spec->field_width = -1; + + if (isdigit(*fmt)) + spec->field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + /* it's the next argument */ + spec->type = FORMAT_TYPE_WIDTH; + return ++fmt - start; + } + +precision: + /* get the precision */ + spec->precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) { + spec->precision = skip_atoi(&fmt); + if (spec->precision < 0) + spec->precision = 0; + } else if (*fmt == '*') { + /* it's the next argument */ + spec->type = FORMAT_TYPE_PRECISION; + return ++fmt - start; + } + } + +qualifier: + /* get the conversion qualifier */ + spec->qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { + spec->qualifier = *fmt; + ++fmt; + if (spec->qualifier == 'l' && *fmt == 'l') { + spec->qualifier = 'L'; + ++fmt; + } + } + + /* default base */ + spec->base = 10; + switch (*fmt) { + case 'c': + spec->type = FORMAT_TYPE_CHAR; + return ++fmt - start; + + case 's': + spec->type = FORMAT_TYPE_STR; + return ++fmt - start; + + case 'p': + spec->type = FORMAT_TYPE_PTR; + return fmt - start; + /* skip alnum */ + + case 'n': + spec->type = FORMAT_TYPE_NRCHARS; + return ++fmt - start; + + case '%': + spec->type = FORMAT_TYPE_PERCENT_CHAR; + return ++fmt - start; + + /* integer number formats - set up the flags and "break" */ + case 'o': + spec->base = 8; break; + + case 'x': + spec->flags |= SMALL; + + case 'X': + spec->base = 16; + break; + + case 'd': + case 'i': + spec->flags |= SIGN; + case 'u': + break; + + default: + spec->type = FORMAT_TYPE_INVALID; + return fmt - start; } - flags |= SMALL; - if (field_width == -1) { - field_width = 2*sizeof(void *); - flags |= ZEROPAD; + + if (spec->qualifier == 'L') + spec->type = FORMAT_TYPE_LONG_LONG; + else if (spec->qualifier == 'l') { + if (spec->flags & SIGN) + spec->type = FORMAT_TYPE_LONG; + else + spec->type = FORMAT_TYPE_ULONG; + } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { + spec->type = FORMAT_TYPE_SIZE_T; + } else if (spec->qualifier == 't') { + spec->type = FORMAT_TYPE_PTRDIFF; + } else if (spec->qualifier == 'h') { + if (spec->flags & SIGN) + spec->type = FORMAT_TYPE_SHORT; + else + spec->type = FORMAT_TYPE_USHORT; + } else { + if (spec->flags & SIGN) + spec->type = FORMAT_TYPE_INT; + else + spec->type = FORMAT_TYPE_UINT; } - return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); + + return ++fmt - start; } /** @@ -726,18 +960,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { unsigned long long num; - int base; char *str, *end, c; - - int flags; /* flags to number() */ - - int field_width; /* width of output field */ - int precision; /* min. # of digits for integers; max - number of chars for from string */ - int qualifier; /* 'h', 'l', or 'L' for integer fields */ - /* 'z' support added 23/7/1999 S.H. */ - /* 'z' changed to 'Z' --davidm 1/25/99 */ - /* 't' added for ptrdiff_t */ + int read; + struct printf_spec spec = {0}; /* Reject out-of-range values early. Large positive sizes are used for unknown buffer sizes. */ @@ -758,184 +983,144 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) size = end - buf; } - for (; *fmt ; ++fmt) { - if (*fmt != '%') { - if (str < end) - *str = *fmt; - ++str; - continue; - } + while (*fmt) { + const char *old_fmt = fmt; - /* process flags */ - flags = 0; - repeat: - ++fmt; /* this also skips first '%' */ - switch (*fmt) { - case '-': flags |= LEFT; goto repeat; - case '+': flags |= PLUS; goto repeat; - case ' ': flags |= SPACE; goto repeat; - case '#': flags |= SPECIAL; goto repeat; - case '0': flags |= ZEROPAD; goto repeat; - } + read = format_decode(fmt, &spec); - /* get field width */ - field_width = -1; - if (isdigit(*fmt)) - field_width = skip_atoi(&fmt); - else if (*fmt == '*') { - ++fmt; - /* it's the next argument */ - field_width = va_arg(args, int); - if (field_width < 0) { - field_width = -field_width; - flags |= LEFT; - } - } + fmt += read; - /* get the precision */ - precision = -1; - if (*fmt == '.') { - ++fmt; - if (isdigit(*fmt)) - precision = skip_atoi(&fmt); - else if (*fmt == '*') { - ++fmt; - /* it's the next argument */ - precision = va_arg(args, int); + switch (spec.type) { + case FORMAT_TYPE_NONE: { + int copy = read; + if (str < end) { + if (copy > end - str) + copy = end - str; + memcpy(str, old_fmt, copy); } - if (precision < 0) - precision = 0; + str += read; + break; } - /* get the conversion qualifier */ - qualifier = -1; - if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || - *fmt =='Z' || *fmt == 'z' || *fmt == 't') { - qualifier = *fmt; - ++fmt; - if (qualifier == 'l' && *fmt == 'l') { - qualifier = 'L'; - ++fmt; - } - } + case FORMAT_TYPE_WIDTH: + spec.field_width = va_arg(args, int); + break; - /* default base */ - base = 10; + case FORMAT_TYPE_PRECISION: + spec.precision = va_arg(args, int); + break; - switch (*fmt) { - case 'c': - if (!(flags & LEFT)) { - while (--field_width > 0) { - if (str < end) - *str = ' '; - ++str; - } - } - c = (unsigned char) va_arg(args, int); - if (str < end) - *str = c; - ++str; - while (--field_width > 0) { + case FORMAT_TYPE_CHAR: + if (!(spec.flags & LEFT)) { + while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; - } - continue; - - case 's': - str = string(str, end, va_arg(args, char *), field_width, precision, flags); - continue; - - case 'p': - str = pointer(fmt+1, str, end, - va_arg(args, void *), - field_width, precision, flags); - /* Skip all alphanumeric pointer suffixes */ - while (isalnum(fmt[1])) - fmt++; - continue; - - case 'n': - /* FIXME: - * What does C99 say about the overflow case here? */ - if (qualifier == 'l') { - long * ip = va_arg(args, long *); - *ip = (str - buf); - } else if (qualifier == 'Z' || qualifier == 'z') { - size_t * ip = va_arg(args, size_t *); - *ip = (str - buf); - } else { - int * ip = va_arg(args, int *); - *ip = (str - buf); - } - continue; - case '%': + } + } + c = (unsigned char) va_arg(args, int); + if (str < end) + *str = c; + ++str; + while (--spec.field_width > 0) { if (str < end) - *str = '%'; + *str = ' '; ++str; - continue; + } + break; - /* integer number formats - set up the flags and "break" */ - case 'o': - base = 8; - break; + case FORMAT_TYPE_STR: + str = string(str, end, va_arg(args, char *), spec); + break; - case 'x': - flags |= SMALL; - case 'X': - base = 16; - break; + case FORMAT_TYPE_PTR: + str = pointer(fmt+1, str, end, va_arg(args, void *), + spec); + while (isalnum(*fmt)) + fmt++; + break; - case 'd': - case 'i': - flags |= SIGN; - case 'u': - break; + case FORMAT_TYPE_PERCENT_CHAR: + if (str < end) + *str = '%'; + ++str; + break; - default: + case FORMAT_TYPE_INVALID: + if (str < end) + *str = '%'; + ++str; + if (*fmt) { if (str < end) - *str = '%'; + *str = *fmt; ++str; - if (*fmt) { - if (str < end) - *str = *fmt; - ++str; - } else { - --fmt; - } - continue; + } else { + --fmt; + } + break; + + case FORMAT_TYPE_NRCHARS: { + int qualifier = spec.qualifier; + + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z' || + qualifier == 'z') { + size_t *ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + break; } - if (qualifier == 'L') - num = va_arg(args, long long); - else if (qualifier == 'l') { - num = va_arg(args, unsigned long); - if (flags & SIGN) - num = (signed long) num; - } else if (qualifier == 'Z' || qualifier == 'z') { - num = va_arg(args, size_t); - } else if (qualifier == 't') { - num = va_arg(args, ptrdiff_t); - } else if (qualifier == 'h') { - num = (unsigned short) va_arg(args, int); - if (flags & SIGN) - num = (signed short) num; - } else { - num = va_arg(args, unsigned int); - if (flags & SIGN) - num = (signed int) num; + + default: + switch (spec.type) { + case FORMAT_TYPE_LONG_LONG: + num = va_arg(args, long long); + break; + case FORMAT_TYPE_ULONG: + num = va_arg(args, unsigned long); + break; + case FORMAT_TYPE_LONG: + num = va_arg(args, long); + break; + case FORMAT_TYPE_SIZE_T: + num = va_arg(args, size_t); + break; + case FORMAT_TYPE_PTRDIFF: + num = va_arg(args, ptrdiff_t); + break; + case FORMAT_TYPE_USHORT: + num = (unsigned short) va_arg(args, int); + break; + case FORMAT_TYPE_SHORT: + num = (short) va_arg(args, int); + break; + case FORMAT_TYPE_INT: + num = (int) va_arg(args, int); + break; + default: + num = va_arg(args, unsigned int); + } + + str = number(str, end, num, spec); } - str = number(str, end, num, base, - field_width, precision, flags); } + if (size > 0) { if (str < end) *str = '\0'; else end[-1] = '\0'; } + /* the trailing null byte doesn't count towards the total */ return str-buf; + } EXPORT_SYMBOL(vsnprintf); @@ -1058,6 +1243,372 @@ int sprintf(char * buf, const char *fmt, ...) } EXPORT_SYMBOL(sprintf); +#ifdef CONFIG_BINARY_PRINTF +/* + * bprintf service: + * vbin_printf() - VA arguments to binary data + * bstr_printf() - Binary data to text string + */ + +/** + * vbin_printf - Parse a format string and place args' binary value in a buffer + * @bin_buf: The buffer to place args' binary value + * @size: The size of the buffer(by words(32bits), not characters) + * @fmt: The format string to use + * @args: Arguments for the format string + * + * The format follows C99 vsnprintf, except %n is ignored, and its argument + * is skiped. + * + * The return value is the number of words(32bits) which would be generated for + * the given input. + * + * NOTE: + * If the return value is greater than @size, the resulting bin_buf is NOT + * valid for bstr_printf(). + */ +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) +{ + struct printf_spec spec = {0}; + char *str, *end; + int read; + + str = (char *)bin_buf; + end = (char *)(bin_buf + size); + +#define save_arg(type) \ +do { \ + if (sizeof(type) == 8) { \ + unsigned long long value; \ + str = PTR_ALIGN(str, sizeof(u32)); \ + value = va_arg(args, unsigned long long); \ + if (str + sizeof(type) <= end) { \ + *(u32 *)str = *(u32 *)&value; \ + *(u32 *)(str + 4) = *((u32 *)&value + 1); \ + } \ + } else { \ + unsigned long value; \ + str = PTR_ALIGN(str, sizeof(type)); \ + value = va_arg(args, int); \ + if (str + sizeof(type) <= end) \ + *(typeof(type) *)str = (type)value; \ + } \ + str += sizeof(type); \ +} while (0) + + + while (*fmt) { + read = format_decode(fmt, &spec); + + fmt += read; + + switch (spec.type) { + case FORMAT_TYPE_NONE: + break; + + case FORMAT_TYPE_WIDTH: + case FORMAT_TYPE_PRECISION: + save_arg(int); + break; + + case FORMAT_TYPE_CHAR: + save_arg(char); + break; + + case FORMAT_TYPE_STR: { + const char *save_str = va_arg(args, char *); + size_t len; + if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE + || (unsigned long)save_str < PAGE_SIZE) + save_str = "<NULL>"; + len = strlen(save_str); + if (str + len + 1 < end) + memcpy(str, save_str, len + 1); + str += len + 1; + break; + } + + case FORMAT_TYPE_PTR: + save_arg(void *); + /* skip all alphanumeric pointer suffixes */ + while (isalnum(*fmt)) + fmt++; + break; + + case FORMAT_TYPE_PERCENT_CHAR: + break; + + case FORMAT_TYPE_INVALID: + if (!*fmt) + --fmt; + break; + + case FORMAT_TYPE_NRCHARS: { + /* skip %n 's argument */ + int qualifier = spec.qualifier; + void *skip_arg; + if (qualifier == 'l') + skip_arg = va_arg(args, long *); + else if (qualifier == 'Z' || qualifier == 'z') + skip_arg = va_arg(args, size_t *); + else + skip_arg = va_arg(args, int *); + break; + } + + default: + switch (spec.type) { + + case FORMAT_TYPE_LONG_LONG: + save_arg(long long); + break; + case FORMAT_TYPE_ULONG: + case FORMAT_TYPE_LONG: + save_arg(unsigned long); + break; + case FORMAT_TYPE_SIZE_T: + save_arg(size_t); + break; + case FORMAT_TYPE_PTRDIFF: + save_arg(ptrdiff_t); + break; + case FORMAT_TYPE_USHORT: + case FORMAT_TYPE_SHORT: + save_arg(short); + break; + default: + save_arg(int); + } + } + } + return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; + +#undef save_arg +} +EXPORT_SYMBOL_GPL(vbin_printf); + +/** + * bstr_printf - Format a string from binary arguments and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @bin_buf: Binary arguments for the format string + * + * This function like C99 vsnprintf, but the difference is that vsnprintf gets + * arguments from stack, and bstr_printf gets arguments from @bin_buf which is + * a binary buffer that generated by vbin_printf. + * + * The format follows C99 vsnprintf, but has some extensions: + * %pS output the name of a text symbol + * %pF output the name of a function pointer + * %pR output the address range in a struct resource + * %n is ignored + * + * The return value is the number of characters which would + * be generated for the given input, excluding the trailing + * '\0', as per ISO C99. If you want to have the exact + * number of characters written into @buf as return value + * (not including the trailing '\0'), use vscnprintf(). If the + * return is greater than or equal to @size, the resulting + * string is truncated. + */ +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) +{ + unsigned long long num; + char *str, *end, c; + const char *args = (const char *)bin_buf; + + struct printf_spec spec = {0}; + + if (unlikely((int) size < 0)) { + /* There can be only one.. */ + static char warn = 1; + WARN_ON(warn); + warn = 0; + return 0; + } + + str = buf; + end = buf + size; + +#define get_arg(type) \ +({ \ + typeof(type) value; \ + if (sizeof(type) == 8) { \ + args = PTR_ALIGN(args, sizeof(u32)); \ + *(u32 *)&value = *(u32 *)args; \ + *((u32 *)&value + 1) = *(u32 *)(args + 4); \ + } else { \ + args = PTR_ALIGN(args, sizeof(type)); \ + value = *(typeof(type) *)args; \ + } \ + args += sizeof(type); \ + value; \ +}) + + /* Make sure end is always >= buf */ + if (end < buf) { + end = ((void *)-1); + size = end - buf; + } + + while (*fmt) { + int read; + const char *old_fmt = fmt; + + read = format_decode(fmt, &spec); + + fmt += read; + + switch (spec.type) { + case FORMAT_TYPE_NONE: { + int copy = read; + if (str < end) { + if (copy > end - str) + copy = end - str; + memcpy(str, old_fmt, copy); + } + str += read; + break; + } + + case FORMAT_TYPE_WIDTH: + spec.field_width = get_arg(int); + break; + + case FORMAT_TYPE_PRECISION: + spec.precision = get_arg(int); + break; + + case FORMAT_TYPE_CHAR: + if (!(spec.flags & LEFT)) { + while (--spec.field_width > 0) { + if (str < end) + *str = ' '; + ++str; + } + } + c = (unsigned char) get_arg(char); + if (str < end) + *str = c; + ++str; + while (--spec.field_width > 0) { + if (str < end) + *str = ' '; + ++str; + } + break; + + case FORMAT_TYPE_STR: { + const char *str_arg = args; + size_t len = strlen(str_arg); + args += len + 1; + str = string(str, end, (char *)str_arg, spec); + break; + } + + case FORMAT_TYPE_PTR: + str = pointer(fmt+1, str, end, get_arg(void *), spec); + while (isalnum(*fmt)) + fmt++; + break; + + case FORMAT_TYPE_PERCENT_CHAR: + if (str < end) + *str = '%'; + ++str; + break; + + case FORMAT_TYPE_INVALID: + if (str < end) + *str = '%'; + ++str; + if (*fmt) { + if (str < end) + *str = *fmt; + ++str; + } else { + --fmt; + } + break; + + case FORMAT_TYPE_NRCHARS: + /* skip */ + break; + + default: + switch (spec.type) { + + case FORMAT_TYPE_LONG_LONG: + num = get_arg(long long); + break; + case FORMAT_TYPE_ULONG: + num = get_arg(unsigned long); + break; + case FORMAT_TYPE_LONG: + num = get_arg(unsigned long); + break; + case FORMAT_TYPE_SIZE_T: + num = get_arg(size_t); + break; + case FORMAT_TYPE_PTRDIFF: + num = get_arg(ptrdiff_t); + break; + case FORMAT_TYPE_USHORT: + num = get_arg(unsigned short); + break; + case FORMAT_TYPE_SHORT: + num = get_arg(short); + break; + case FORMAT_TYPE_UINT: + num = get_arg(unsigned int); + break; + default: + num = get_arg(int); + } + + str = number(str, end, num, spec); + } + } + + if (size > 0) { + if (str < end) + *str = '\0'; + else + end[-1] = '\0'; + } + +#undef get_arg + + /* the trailing null byte doesn't count towards the total */ + return str - buf; +} +EXPORT_SYMBOL_GPL(bstr_printf); + +/** + * bprintf - Parse a format string and place args' binary value in a buffer + * @bin_buf: The buffer to place args' binary value + * @size: The size of the buffer(by words(32bits), not characters) + * @fmt: The format string to use + * @...: Arguments for the format string + * + * The function returns the number of words(u32) written + * into @bin_buf. + */ +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) +{ + va_list args; + int ret; + + va_start(args, fmt); + ret = vbin_printf(bin_buf, size, fmt, args); + va_end(args); + return ret; +} +EXPORT_SYMBOL_GPL(bprintf); + +#endif /* CONFIG_BINARY_PRINTF */ + /** * vsscanf - Unformat a buffer into a list of arguments * @buf: input buffer |