diff options
Diffstat (limited to 'include/linux')
214 files changed, 5610 insertions, 1815 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 41f7ce7edd7a..a17177639376 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -82,6 +82,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); int acpi_boot_table_init (void); +int acpi_mps_check (void); int acpi_numa_init (void); int acpi_table_init (void); @@ -234,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n, int acpi_check_mem_region(resource_size_t start, resource_size_t n, const char *name); +#ifdef CONFIG_PM_SLEEP +void __init acpi_old_suspend_ordering(void); +#endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ static inline int early_acpi_boot_init(void) @@ -250,6 +254,11 @@ static inline int acpi_boot_table_init(void) return 0; } +static inline int acpi_mps_check(void) +{ + return 0; +} + static inline int acpi_check_resource_conflict(struct resource *res) { return 0; diff --git a/include/linux/adb.h b/include/linux/adb.h index 64d8878e1444..63bca502fa55 100644 --- a/include/linux/adb.h +++ b/include/linux/adb.h @@ -84,7 +84,6 @@ enum adb_message { ADB_MSG_PRE_RESET, /* Called before resetting the bus */ ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ }; -extern struct adb_driver *adb_controller; extern struct blocking_notifier_head adb_client_list; int adb_request(struct adb_request *req, void (*done)(struct adb_request *), diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index eb640f0acfac..0f50d4cc4360 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, /** * async_tx_sync_epilog - actions to take if an operation is run synchronously - * @flags: async_tx flags - * @depend_tx: transaction depends on depend_tx * @cb_fn: function to call when the transaction completes * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(unsigned long flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) { if (cb_fn) cb_fn(cb_fn_param); - - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) - async_tx_ack(depend_tx); } void @@ -152,4 +145,6 @@ struct dma_async_tx_descriptor * async_trigger_callback(enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_fn_param); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index ad895455ab72..0da17d14fd13 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -26,8 +26,10 @@ #define AT_SECURE 23 /* secure mode boolean */ +#define AT_EXECFN 31 /* filename of program */ #ifdef __KERNEL__ -#define AT_VECTOR_SIZE_BASE (14 + 2) /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 17 /* NEW_AUX_ENT entries in auxiliary table */ + /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif #endif /* _LINUX_AUXVEC_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 61c15eaf3fb3..0933a14e6414 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -64,6 +64,7 @@ struct bio_vec { struct bio_set; struct bio; +struct bio_integrity_payload; typedef void (bio_end_io_t) (struct bio *, int); typedef void (bio_destructor_t) (struct bio *); @@ -112,6 +113,9 @@ struct bio { atomic_t bi_cnt; /* pin count */ void *bi_private; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; /* data integrity */ +#endif bio_destructor_t *bi_destructor; /* destructor */ }; @@ -271,6 +275,29 @@ static inline void *bio_data(struct bio *bio) */ #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) +#if defined(CONFIG_BLK_DEV_INTEGRITY) +/* + * bio integrity payload + */ +struct bio_integrity_payload { + struct bio *bip_bio; /* parent bio */ + struct bio_vec *bip_vec; /* integrity data vector */ + + sector_t bip_sector; /* virtual start sector */ + + void *bip_buf; /* generated integrity data */ + bio_end_io_t *bip_end_io; /* saved I/O completion fn */ + + int bip_error; /* saved I/O error */ + unsigned int bip_size; + + unsigned short bip_pool; /* pool the ivec came from */ + unsigned short bip_vcnt; /* # of integrity bio_vecs */ + unsigned short bip_idx; /* current bip_vec index */ + + struct work_struct bip_work; /* I/O completion */ +}; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ /* * A bio_pair is used when we need to split a bio. @@ -283,10 +310,14 @@ static inline void *bio_data(struct bio *bio) * in bio2.bi_private */ struct bio_pair { - struct bio bio1, bio2; - struct bio_vec bv1, bv2; - atomic_t cnt; - int error; + struct bio bio1, bio2; + struct bio_vec bv1, bv2; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload bip1, bip2; + struct bio_vec iv1, iv2; +#endif + atomic_t cnt; + int error; }; extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors); @@ -333,6 +364,39 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, int, int); extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); +extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); +extern unsigned int bvec_nr_vecs(unsigned short idx); + +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +#define BIO_POOL_SIZE 2 +#define BIOVEC_NR_POOLS 6 + +struct bio_set { + mempool_t *bio_pool; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + mempool_t *bio_integrity_pool; +#endif + mempool_t *bvec_pools[BIOVEC_NR_POOLS]; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +extern struct bio_set *fs_bio_set; + +/* + * a small number of entries is fine, not going to be performance critical. + * basically we just need to survive + */ +#define BIO_SPLIT_ENTRIES 2 #ifdef CONFIG_HIGHMEM /* @@ -381,5 +445,63 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) +#define bip_vec(bip) bip_vec_idx(bip, 0) + +#define __bip_for_each_vec(bvl, bip, i, start_idx) \ + for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \ + i < (bip)->bip_vcnt; \ + bvl++, i++) + +#define bip_for_each_vec(bvl, bip, i) \ + __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) + +static inline int bio_integrity(struct bio *bio) +{ +#if defined(CONFIG_BLK_DEV_INTEGRITY) + return bio->bi_integrity != NULL; +#else + return 0; +#endif +} + +extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); +extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern void bio_integrity_free(struct bio *, struct bio_set *); +extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); +extern int bio_integrity_enabled(struct bio *bio); +extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); +extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); +extern int bio_integrity_prep(struct bio *); +extern void bio_integrity_endio(struct bio *, int); +extern void bio_integrity_advance(struct bio *, unsigned int); +extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); +extern void bio_integrity_split(struct bio *, struct bio_pair *, int); +extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *); +extern int bioset_integrity_create(struct bio_set *, int); +extern void bioset_integrity_free(struct bio_set *); +extern void bio_integrity_init_slab(void); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +#define bio_integrity(a) (0) +#define bioset_integrity_create(a, b) (0) +#define bio_integrity_prep(a) (0) +#define bio_integrity_enabled(a) (0) +#define bio_integrity_clone(a, b, c) (0) +#define bioset_integrity_free(a) do { } while (0) +#define bio_integrity_free(a, b) do { } while (0) +#define bio_integrity_endio(a, b) do { } while (0) +#define bio_integrity_advance(a, b) do { } while (0) +#define bio_integrity_trim(a, b, c) do { } while (0) +#define bio_integrity_split(a, b, c) do { } while (0) +#define bio_integrity_set_tag(a, b, c) do { } while (0) +#define bio_integrity_get_tag(a, b, c) do { } while (0) +#define bio_integrity_init_slab(a) do { } while (0) + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + #endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d2a1b71e93c3..88d68081a0f1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -23,7 +23,6 @@ struct scsi_ioctl_command; struct request_queue; -typedef struct request_queue request_queue_t __deprecated; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; @@ -34,12 +33,6 @@ struct sg_io_hdr; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ -int put_io_context(struct io_context *ioc); -void exit_io_context(void); -struct io_context *get_io_context(gfp_t gfp_flags, int node); -struct io_context *alloc_io_context(gfp_t gfp_flags, int node); -void copy_io_context(struct io_context **pdst, struct io_context **psrc); - struct request; typedef void (rq_end_io_fn)(struct request *, int); @@ -113,6 +106,7 @@ enum rq_flag_bits { __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ + __REQ_INTEGRITY, /* integrity metadata has been remapped */ __REQ_NR_BITS, /* stops here */ }; @@ -135,6 +129,7 @@ enum rq_flag_bits { #define REQ_ALLOCED (1 << __REQ_ALLOCED) #define REQ_RW_META (1 << __REQ_RW_META) #define REQ_COPY_USER (1 << __REQ_COPY_USER) +#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) #define BLK_MAX_CDB 16 @@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef void (unplug_fn) (struct request_queue *); struct bio_vec; -typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); +struct bvec_merge_data { + struct block_device *bi_bdev; + sector_t bi_sector; + unsigned bi_size; + unsigned long bi_rw; +}; +typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, + struct bio_vec *); typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); @@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag, __set_bit(flag, &q->queue_flags); } +static inline int queue_flag_test_and_clear(unsigned int flag, + struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + + if (test_bit(flag, &q->queue_flags)) { + __clear_bit(flag, &q->queue_flags); + return 1; + } + + return 0; +} + +static inline int queue_flag_test_and_set(unsigned int flag, + struct request_queue *q) +{ + WARN_ON_ONCE(!queue_is_locked(q)); + + if (!test_bit(flag, &q->queue_flags)) { + __set_bit(flag, &q->queue_flags); + return 0; + } + + return 1; +} + static inline void queue_flag_set(unsigned int flag, struct request_queue *q) { WARN_ON_ONCE(!queue_is_locked(q)); @@ -623,7 +651,6 @@ extern void generic_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); -extern void blk_end_sync_rq(struct request *rq, int error); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); @@ -676,7 +703,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); -extern int blk_verify_command(unsigned char *, int); extern void blk_unplug(struct request_queue *q); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) @@ -749,6 +775,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_dma_pad(struct request_queue *, unsigned int); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size); @@ -802,6 +829,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, extern int blkdev_issue_flush(struct block_device *, sector_t *); +/* +* command filter functions +*/ +extern int blk_verify_command(struct file *file, unsigned char *cmd); +extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter, + unsigned char *cmd, mode_t *f_mode); +extern int blk_register_filter(struct gendisk *disk); +extern void blk_unregister_filter(struct gendisk *disk); + #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 #define SAFE_MAX_SECTORS 255 @@ -865,28 +901,119 @@ void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") +#if defined(CONFIG_BLK_DEV_INTEGRITY) -#else /* CONFIG_BLOCK */ -/* - * stubs for when the block layer is configured out - */ -#define buffer_heads_over_limit 0 +#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ +#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ -static inline long nr_blockdev_pages(void) +struct blk_integrity_exchg { + void *prot_buf; + void *data_buf; + sector_t sector; + unsigned int data_size; + unsigned short sector_size; + const char *disk_name; +}; + +typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); +typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); +typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); +typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); + +struct blk_integrity { + integrity_gen_fn *generate_fn; + integrity_vrfy_fn *verify_fn; + integrity_set_tag_fn *set_tag_fn; + integrity_get_tag_fn *get_tag_fn; + + unsigned short flags; + unsigned short tuple_size; + unsigned short sector_size; + unsigned short tag_size; + + const char *name; + + struct kobject kobj; +}; + +extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct block_device *, struct block_device *); +extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request *); + +static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) { + if (bi) + return bi->tuple_size; + return 0; } -static inline void exit_io_context(void) +static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) { + return bdev->bd_disk->integrity; } -struct io_context; -static inline int put_io_context(struct io_context *ioc) +static inline unsigned int bdev_get_tag_size(struct block_device *bdev) { - return 1; + struct blk_integrity *bi = bdev_get_integrity(bdev); + + if (bi) + return bi->tag_size; + + return 0; +} + +static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) +{ + struct blk_integrity *bi = bdev_get_integrity(bdev); + + if (bi == NULL) + return 0; + + if (rw == READ && bi->verify_fn != NULL && + (bi->flags & INTEGRITY_FLAG_READ)) + return 1; + + if (rw == WRITE && bi->generate_fn != NULL && + (bi->flags & INTEGRITY_FLAG_WRITE)) + return 1; + + return 0; } +static inline int blk_integrity_rq(struct request *rq) +{ + if (rq->bio == NULL) + return 0; + + return bio_integrity(rq->bio); +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +#define blk_integrity_rq(rq) (0) +#define blk_rq_count_integrity_sg(a) (0) +#define blk_rq_map_integrity_sg(a, b) (0) +#define bdev_get_integrity(a) (0) +#define bdev_get_tag_size(a) (0) +#define blk_integrity_compare(a, b) (0) +#define blk_integrity_register(a, b) (0) +#define blk_integrity_unregister(a) do { } while (0); + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#else /* CONFIG_BLOCK */ +/* + * stubs for when the block layer is configured out + */ +#define buffer_heads_over_limit 0 + +static inline long nr_blockdev_pages(void) +{ + return 0; +} #endif /* CONFIG_BLOCK */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index e3ef903aae88..d084b8d227a5 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -129,6 +129,7 @@ struct blk_trace { u32 dev; struct dentry *dir; struct dentry *dropped_file; + struct dentry *msg_file; atomic_t dropped; }; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 686895bacd9d..a1d9b79078ea 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -84,6 +84,8 @@ extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); __alloc_bootmem_low(x, PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ +extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, + int flags); extern unsigned long free_all_bootmem(void); extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); extern void *__alloc_bootmem_node(pg_data_t *pgdat, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h new file mode 100644 index 000000000000..9b64b6d67873 --- /dev/null +++ b/include/linux/brcmphy.h @@ -0,0 +1,6 @@ +#define PHY_BRCM_WIRESPEED_ENABLE 0x00000001 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000002 +#define PHY_BRCM_APD_CLK125_ENABLE 0x00000004 +#define PHY_BRCM_STD_IBND_DISABLE 0x00000008 +#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00000010 +#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00000020 diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 3ae65b1bf90f..d62c19ff041c 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -148,7 +148,8 @@ struct configfs_attribute { * items. If the item is a group, it may support mkdir(2). * Groups supply one of make_group() and make_item(). If the * group supports make_group(), one can create group children. If it - * supports make_item(), one can create config_item children. If it has + * supports make_item(), one can create config_item children. make_group() + * and make_item() return ERR_PTR() on errors. If it has * default_groups on group->default_groups, it has automatically created * group children. default_groups may coexist alongsize make_group() or * make_item(), but if the group wishes to have only default_groups diff --git a/include/linux/console.h b/include/linux/console.h index a4f27fbdf549..248e6e3b9b73 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -108,6 +108,8 @@ struct console { struct console *next; }; +extern int console_set_on_cmdline; + extern int add_preferred_console(char *name, int idx, char *options); extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); extern void register_console(struct console *); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index e7e91dbfde0f..2270ca5ec631 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -4,9 +4,6 @@ * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * - * - * $Id: cpufreq.h,v 1.36 2003/01/20 17:31:48 db Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5b..1b5c98e7fef7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -17,6 +17,20 @@ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. * + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * Note: The alternate operations with the suffix "_nr" are used + * to limit the range of the loop to nr_cpu_ids instead of + * NR_CPUS when NR_CPUS > 64 for performance reasons. + * If NR_CPUS is <= 64 then most assembler bitmask + * operators execute faster with a constant range, so + * the operator will continue to use NR_CPUS. + * + * Another consideration is that nr_cpu_ids is initialized + * to NR_CPUS and isn't lowered until the possible cpus are + * discovered (including any disabled cpus). So early uses + * will span the entire range of NR_CPUS. + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * * The available cpumask operations are: * * void cpu_set(cpu, mask) turn on bit 'cpu' in mask @@ -38,18 +52,60 @@ * int cpus_empty(mask) Is mask empty (no bits sets)? * int cpus_full(mask) Is mask full (all bits sets)? * int cpus_weight(mask) Hamming weigh - number of set bits + * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS * * void cpus_shift_right(dst, src, n) Shift right * void cpus_shift_left(dst, src, n) Shift left * * int first_cpu(mask) Number lowest set bit, or NR_CPUS * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS + * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set + *ifdef CONFIG_HAS_CPUMASK_OF_CPU + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v + * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *else + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v + * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations + *endif * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask * + * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t + * variables, and CPUMASK_PTR provides pointers to each field. + * + * The structure should be defined something like this: + * struct my_cpumasks { + * cpumask_t mask1; + * cpumask_t mask2; + * }; + * + * Usage is then: + * CPUMASK_ALLOC(my_cpumasks); + * CPUMASK_PTR(mask1, my_cpumasks); + * CPUMASK_PTR(mask2, my_cpumasks); + * + * --- DO NOT reference cpumask_t pointers until this check --- + * if (my_cpumasks == NULL) + * "kmalloc failed"... + * + * References are now pointers to the cpumask_t variables (*mask1, ...) + * + *if NR_CPUS > BITS_PER_LONG + * CPUMASK_ALLOC(m) Declares and allocates struct m *m = + * kmalloc(sizeof(*m), GFP_KERNEL) + * CPUMASK_FREE(m) Macro for kfree(m) + *else + * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m + * CPUMASK_FREE(m) Nop + *endif + * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) + * ------------------------------------------------------------------------ + * * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing @@ -59,7 +115,8 @@ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz * - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask + * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS + * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids * * int num_online_cpus() Number of online CPUs * int num_possible_cpus() Number of all possible CPUs @@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#ifdef CONFIG_SMP -int __first_cpu(const cpumask_t *srcp); -#define first_cpu(src) __first_cpu(&(src)) -int __next_cpu(int n, const cpumask_t *srcp); -#define next_cpu(n, src) __next_cpu((n), &(src)) -#else -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#endif #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) - +#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) +#define cpumask_of_cpu_ptr(v, cpu) \ + const cpumask_t *v = &cpumask_of_cpu(cpu) +#define cpumask_of_cpu_ptr_declare(v) \ + const cpumask_t *v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + v = &cpumask_of_cpu(cpu) #else #define cpumask_of_cpu(cpu) \ -(*({ \ +({ \ typeof(_unused_cpumask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL<<(cpu); \ @@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map; cpus_clear(m); \ cpu_set((cpu), m); \ } \ - &m; \ -})) + m; \ +}) +#define cpumask_of_cpu_ptr(v, cpu) \ + cpumask_t _##v = cpumask_of_cpu(cpu); \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_declare(v) \ + cpumask_t _##v; \ + const cpumask_t *v = &_##v +#define cpumask_of_cpu_ptr_next(v, cpu) \ + _##v = cpumask_of_cpu(cpu) #endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) @@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all; #define cpus_addr(src) ((src).bits) +#if NR_CPUS > BITS_PER_LONG +#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) +#define CPUMASK_FREE(m) kfree(m) +#else +#define CPUMASK_ALLOC(m) struct m _m, *m = &_m +#define CPUMASK_FREE(m) +#endif +#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) + #define cpumask_scnprintf(buf, len, src) \ __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) static inline int __cpumask_scnprintf(char *buf, int len, @@ -343,29 +413,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, bitmap_fold(dstp->bits, origp->bits, sz, nbits); } -#if NR_CPUS > 1 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = first_cpu(mask); \ - (cpu) < NR_CPUS; \ - (cpu) = next_cpu((cpu), (mask))) -#else /* NR_CPUS == 1 */ -#define for_each_cpu_mask(cpu, mask) \ +#if NR_CPUS == 1 + +#define nr_cpu_ids 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#endif /* NR_CPUS */ + +#else /* NR_CPUS > 1 */ + +extern int nr_cpu_ids; +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); +int __any_online_cpu(const cpumask_t *mask); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) __any_online_cpu(&(mask)) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) +#endif + +#if NR_CPUS <= 64 #define next_cpu_nr(n, src) next_cpu(n, src) #define cpus_weight_nr(cpumask) cpus_weight(cpumask) #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) +#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu_nr((cpu), (mask)), \ + (cpu) < nr_cpu_ids; ) + +#endif /* NR_CPUS > 64 */ + /* * The following particular system cpumasks and operations manage - * possible, present and online cpus. Each of them is a fixed size + * possible, present, active and online cpus. Each of them is a fixed size * bitmap of size NR_CPUS. * * #ifdef CONFIG_HOTPLUG_CPU * cpu_possible_map - has bit 'cpu' set iff cpu is populatable * cpu_present_map - has bit 'cpu' set iff cpu is populated * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_map - has bit 'cpu' set iff cpu available to migration * #else * cpu_possible_map - has bit 'cpu' set iff cpu is populated * cpu_present_map - copy of cpu_possible_map @@ -416,14 +516,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; +extern cpumask_t cpu_active_map; #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight(cpu_online_map) -#define num_possible_cpus() cpus_weight(cpu_possible_map) -#define num_present_cpus() cpus_weight(cpu_present_map) +#define num_online_cpus() cpus_weight_nr(cpu_online_map) +#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) +#define num_present_cpus() cpus_weight_nr(cpu_present_map) #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) +#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) #else #define num_online_cpus() 1 #define num_possible_cpus() 1 @@ -431,21 +533,13 @@ extern cpumask_t cpu_present_map; #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) #endif #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#ifdef CONFIG_SMP -extern int nr_cpu_ids; -#define any_online_cpu(mask) __any_online_cpu(&(mask)) -int __any_online_cpu(const cpumask_t *mask); -#else -#define nr_cpu_ids 1 -#define any_online_cpu(mask) 0 -#endif - -#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) +#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) +#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) +#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 038578362b47..e8f450c499b0 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -78,6 +78,8 @@ extern void cpuset_track_online_nodes(void); extern int current_cpuset_is_being_rebound(void); +extern void rebuild_sched_domains(void); + #else /* !CONFIG_CPUSETS */ static inline int cpuset_init_early(void) { return 0; } @@ -156,6 +158,11 @@ static inline int current_cpuset_is_being_rebound(void) return 0; } +static inline void rebuild_sched_domains(void) +{ + partition_sched_domains(0, NULL, NULL); +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h new file mode 100644 index 000000000000..a9c96d865ee7 --- /dev/null +++ b/include/linux/crc-t10dif.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_CRC_T10DIF_H +#define _LINUX_CRC_T10DIF_H + +#include <linux/types.h> + +__u16 crc_t10dif(unsigned char const *, size_t); + +#endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 425824bd49f3..c43dc47fdf75 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -30,15 +30,17 @@ */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 -#define CRYPTO_ALG_TYPE_DIGEST 0x00000002 -#define CRYPTO_ALG_TYPE_HASH 0x00000003 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 +#define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008 -#define CRYPTO_ALG_TYPE_AEAD 0x00000009 +#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 +#define CRYPTO_ALG_TYPE_HASH 0x00000009 +#define CRYPTO_ALG_TYPE_AHASH 0x0000000a #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 @@ -102,6 +104,7 @@ struct crypto_async_request; struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; +struct crypto_ahash; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; @@ -131,6 +134,16 @@ struct ablkcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + /** * struct aead_request - AEAD request * @base: Common attributes for async crypto requests @@ -195,6 +208,17 @@ struct ablkcipher_alg { unsigned int ivsize; }; +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int digestsize; +}; + struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); @@ -272,6 +296,7 @@ struct compress_alg { #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest #define cra_hash cra_u.hash +#define cra_ahash cra_u.ahash #define cra_compress cra_u.compress struct crypto_alg { @@ -298,6 +323,7 @@ struct crypto_alg { struct cipher_alg cipher; struct digest_alg digest; struct hash_alg hash; + struct ahash_alg ahash; struct compress_alg compress; } cra_u; @@ -383,6 +409,18 @@ struct hash_tfm { unsigned int digestsize; }; +struct ahash_tfm { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int digestsize; + unsigned int reqsize; +}; + struct compress_tfm { int (*cot_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, @@ -397,6 +435,7 @@ struct compress_tfm { #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash +#define crt_ahash crt_u.ahash #define crt_compress crt_u.compress struct crypto_tfm { @@ -409,6 +448,7 @@ struct crypto_tfm { struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; + struct ahash_tfm ahash; struct compress_tfm compress; } crt_u; diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 504cb2c3fa9a..2d3d1e04ba92 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -550,11 +550,11 @@ struct cyclades_icount { struct cyclades_port { int magic; + struct tty_port port; struct cyclades_card *card; int line; int flags; /* defined in tty.h */ int type; /* UART type */ - struct tty_struct *tty; int read_status_mask; int ignore_status_mask; int timeout; @@ -567,13 +567,8 @@ struct cyclades_port { int chip_rev; int custom_divisor; u8 x_char; /* to be pushed out ASAP */ - int close_delay; - unsigned short closing_wait; - int count; /* # of fd on device */ int breakon; int breakoff; - int blocked_open; /* # of blocked opens */ - unsigned char *xmit_buf; int xmit_head; int xmit_tail; int xmit_cnt; @@ -583,16 +578,14 @@ struct cyclades_port { struct cyclades_monitor mon; struct cyclades_idle_stats idle_stats; struct cyclades_icount icount; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; struct completion shutdown_wait; wait_queue_head_t delta_msr_wait; int throttle; }; #define CLOSING_WAIT_DELAY 30*HZ -#define CY_CLOSING_WAIT_NONE 65535 -#define CY_CLOSING_WAIT_INF 0 +#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE +#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF #define CyMAX_CHIPS_PER_CARD 8 diff --git a/include/linux/dca.h b/include/linux/dca.h index af61cd1f37e9..b00a753eda53 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb); #define DCA_PROVIDER_REMOVE 0x0002 struct dca_provider { + struct list_head node; struct dca_ops *ops; struct device *cd; int id; @@ -18,7 +19,9 @@ struct dca_provider { struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); - u8 (*get_tag) (struct dca_provider *, int cpu); + u8 (*get_tag) (struct dca_provider *, struct device *, + int cpu); + int (*dev_managed) (struct dca_provider *, struct device *); }; struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); @@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca) } /* Requester API */ +#define DCA_GET_TAG_TWO_ARGS int dca_add_requester(struct device *dev); int dca_remove_requester(struct device *dev); u8 dca_get_tag(int cpu); +u8 dca3_get_tag(struct device *dev, int cpu); /* internal stuff */ int __init dca_sysfs_init(void); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d982eb89c77d..98202c672fde 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -3,6 +3,7 @@ #include <asm/atomic.h> #include <linux/list.h> +#include <linux/rculist.h> #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/rcupdate.h> diff --git a/include/linux/dccp.h b/include/linux/dccp.h index aa0737019e37..6080449fbec9 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -364,8 +364,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) /* FIXME: for now we're default to 1 but it should really be 0 */ #define DCCPF_INITIAL_SEND_NDP_COUNT 1 -#define DCCP_NDP_LIMIT 0xFFFFFF - /** * struct dccp_minisock - Minimal DCCP connection representation * @@ -437,7 +435,7 @@ extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, struct sk_buff *skb); struct dccp_options_received { - u32 dccpor_ndp; /* only 24 bits */ + u64 dccpor_ndp:48; u32 dccpor_timestamp; u32 dccpor_timestamp_echo; u32 dccpor_elapsed_time; @@ -533,7 +531,7 @@ struct dccp_sock { __u16 dccps_r_ack_ratio; __u16 dccps_pcslen; __u16 dccps_pcrlen; - unsigned long dccps_ndp_count; + __u64 dccps_ndp_count:48; unsigned long dccps_rate_last; struct dccp_minisock dccps_minisock; struct dccp_ackvec *dccps_hc_rx_ackvec; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 7266124361b4..e1a6c046cea3 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -26,6 +26,8 @@ struct debugfs_blob_wrapper { unsigned long size; }; +extern struct dentry *arch_debugfs_dir; + #if defined(CONFIG_DEBUG_FS) /* declared over in file.c */ @@ -42,6 +44,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); void debugfs_remove(struct dentry *dentry); +void debugfs_remove_recursive(struct dentry *dentry); struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); @@ -99,6 +102,9 @@ static inline struct dentry *debugfs_create_symlink(const char *name, static inline void debugfs_remove(struct dentry *dentry) { } +static inline void debugfs_remove_recursive(struct dentry *dentry) +{ } + static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, char *new_name) { diff --git a/include/linux/delay.h b/include/linux/delay.h index 54552d21296e..fd832c6d419e 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -41,6 +41,7 @@ static inline void ndelay(unsigned long x) #define ndelay(x) ndelay(x) #endif +extern unsigned long lpj_fine; void calibrate_delay(void); void msleep(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 0d8d419d191a..a90222e3297d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -9,11 +9,13 @@ #define _LINUX_DEVICE_MAPPER_H #include <linux/bio.h> +#include <linux/blkdev.h> struct dm_target; struct dm_table; struct dm_dev; struct mapped_device; +struct bio_vec; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -72,6 +74,9 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size); + void dm_error(const char *message); /* @@ -107,6 +112,7 @@ struct target_type { dm_status_fn status; dm_message_fn message; dm_ioctl_fn ioctl; + dm_merge_fn merge; }; struct io_restrictions { diff --git a/include/linux/device.h b/include/linux/device.h index 6a2d04c011bc..d24a47f80f9c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -16,6 +16,7 @@ #include <linux/kobject.h> #include <linux/klist.h> #include <linux/list.h> +#include <linux/lockdep.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/module.h> @@ -24,17 +25,13 @@ #include <asm/atomic.h> #include <asm/device.h> -#define DEVICE_NAME_SIZE 50 -/* DEVICE_NAME_HALF is really less than half to accommodate slop */ -#define DEVICE_NAME_HALF __stringify(20) -#define DEVICE_ID_SIZE 32 -#define BUS_ID_SIZE KOBJ_NAME_LEN - +#define BUS_ID_SIZE 20 struct device; struct device_driver; struct driver_private; struct class; +struct class_private; struct bus_type; struct bus_type_private; @@ -68,6 +65,8 @@ struct bus_type { int (*resume_early)(struct device *dev); int (*resume)(struct device *dev); + struct pm_ext_ops *pm; + struct bus_type_private *p; }; @@ -131,6 +130,8 @@ struct device_driver { int (*resume) (struct device *dev); struct attribute_group **groups; + struct pm_ops *pm; + struct driver_private *p; }; @@ -182,13 +183,9 @@ struct class { const char *name; struct module *owner; - struct kset subsys; - struct list_head devices; - struct list_head interfaces; - struct kset class_dirs; - struct semaphore sem; /* locks children, devices, interfaces */ struct class_attribute *class_attrs; struct device_attribute *dev_attrs; + struct kobject *dev_kobj; int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); @@ -197,13 +194,30 @@ struct class { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + + struct pm_ops *pm; + struct class_private *p; }; -extern int __must_check class_register(struct class *class); +extern struct kobject *sysfs_dev_block_kobj; +extern struct kobject *sysfs_dev_char_kobj; +extern int __must_check __class_register(struct class *class, + struct lock_class_key *key); extern void class_unregister(struct class *class); -extern int class_for_each_device(struct class *class, void *data, + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_register(class) \ +({ \ + static struct lock_class_key __key; \ + __class_register(class, &__key); \ +}) + +extern int class_for_each_device(struct class *class, struct device *start, + void *data, int (*fn)(struct device *dev, void *data)); -extern struct device *class_find_device(struct class *class, void *data, +extern struct device *class_find_device(struct class *class, + struct device *start, void *data, int (*match)(struct device *, void *)); struct class_attribute { @@ -231,9 +245,19 @@ struct class_interface { extern int __must_check class_interface_register(struct class_interface *); extern void class_interface_unregister(struct class_interface *); -extern struct class *class_create(struct module *owner, const char *name); +extern struct class * __must_check __class_create(struct module *owner, + const char *name, + struct lock_class_key *key); extern void class_destroy(struct class *cls); +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_create(owner, name) \ +({ \ + static struct lock_class_key __key; \ + __class_create(owner, name, &__key); \ +}) + /* * The type of device, "struct device" is embedded in. A class * or bus can contain devices of different types @@ -248,8 +272,11 @@ struct device_type { struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); void (*release)(struct device *dev); + int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + + struct pm_ops *pm; }; /* interface for exporting device attributes */ @@ -459,14 +486,10 @@ extern struct device *device_create_vargs(struct class *cls, const char *fmt, va_list vargs); extern struct device *device_create(struct class *cls, struct device *parent, - dev_t devt, const char *fmt, ...) - __attribute__((format(printf, 4, 5))); -extern struct device *device_create_drvdata(struct class *cls, - struct device *parent, - dev_t devt, - void *drvdata, - const char *fmt, ...) + dev_t devt, void *drvdata, + const char *fmt, ...) __attribute__((format(printf, 5, 6))); +#define device_create_drvdata device_create extern void device_destroy(struct class *cls, dev_t devt); /* diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index b03c41bbfa14..28c2940eb30d 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -256,9 +256,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 13 +#define DM_VERSION_MINOR 14 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2007-10-18)" +#define DM_VERSION_EXTRA "-ioctl (2008-04-23)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index a3750462f9e3..fc82446b6425 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -21,6 +21,7 @@ #define DM9000_PLATF_32BITONLY (0x0004) #define DM9000_PLATF_EXT_PHY (0x0008) #define DM9000_PLATF_NO_EEPROM (0x0010) +#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */ /* platfrom data for platfrom device structure's platfrom_data field */ diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h index 1677e2bfa00c..71ad34eca6e3 100644 --- a/include/linux/dma-attrs.h +++ b/include/linux/dma-attrs.h @@ -12,6 +12,7 @@ */ enum dma_attr { DMA_ATTR_WRITE_BARRIER, + DMA_ATTR_WEAK_ORDERING, DMA_ATTR_MAX, }; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d08a5c5eb928..adb0b084eb5a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -89,10 +89,23 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, + DMA_SLAVE, }; /* last transaction type for creation of the capabilities mask */ -#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) +#define DMA_TX_TYPE_END (DMA_SLAVE + 1) + +/** + * enum dma_slave_width - DMA slave register access width. + * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum dma_slave_width { + DMA_SLAVE_WIDTH_8BIT, + DMA_SLAVE_WIDTH_16BIT, + DMA_SLAVE_WIDTH_32BIT, +}; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, @@ -102,10 +115,14 @@ enum dma_transaction_type { * @DMA_CTRL_ACK - the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any * dependency chains + * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) + * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), + DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), + DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; /** @@ -115,6 +132,32 @@ enum dma_ctrl_flags { typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** + * struct dma_slave - Information about a DMA slave + * @dev: device acting as DMA slave + * @dma_dev: required DMA master device. If non-NULL, the client can not be + * bound to other masters than this. + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + * + * If dma_dev is non-NULL, the client can not be bound to other DMA + * masters than the one corresponding to this device. The DMA master + * driver may use this to determine if there is controller-specific + * data wrapped around this struct. Drivers of platform code that sets + * the dma_dev field must therefore make sure to use an appropriate + * controller-specific dma slave structure wrapping this struct. + */ +struct dma_slave { + struct device *dev; + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum dma_slave_width reg_width; +}; + +/** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter @@ -139,6 +182,7 @@ struct dma_chan_percpu { * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu + * @client-count: how many clients are using this channel */ struct dma_chan { struct dma_device *device; @@ -154,6 +198,7 @@ struct dma_chan { struct list_head device_node; struct dma_chan_percpu *local; + int client_count; }; #define to_dma_chan(p) container_of(p, struct dma_chan, dev) @@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, * @event_callback: func ptr to call when something happens * @cap_mask: only return channels that satisfy the requested capabilities * a value of zero corresponds to any capability + * @slave: data for preparing slave transfer. Must be non-NULL iff the + * DMA_SLAVE capability is requested. * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; dma_cap_mask_t cap_mask; + struct dma_slave *slave; struct list_head global_node; }; @@ -263,6 +311,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_zero_sum: prepares a zero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_terminate_all: terminate all pending operations * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -279,7 +329,8 @@ struct dma_device { int dev_id; struct device *dev; - int (*device_alloc_chan_resources)(struct dma_chan *chan); + int (*device_alloc_chan_resources)(struct dma_chan *chan, + struct dma_client *client); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -297,6 +348,12 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags); + void (*device_terminate_all)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); @@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan); -static inline void -async_tx_ack(struct dma_async_tx_descriptor *tx) +static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { tx->flags |= DMA_CTRL_ACK; } -static inline int -async_tx_test_ack(struct dma_async_tx_descriptor *tx) +static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) { - return tx->flags & DMA_CTRL_ACK; + return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; } #define first_dma_cap(mask) __first_dma_cap(&(mask)) diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 000000000000..04d217b442bf --- /dev/null +++ b/include/linux/dw_dmac.h @@ -0,0 +1,62 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include <linux/dmaengine.h> + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; +}; + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * @slave: Generic information about the slave + * @ctl_lo: Platform-specific initializer for the CTL_LO register + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + */ +struct dw_dma_slave { + struct dma_slave slave; + u32 cfg_hi; + u32 cfg_lo; +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) +{ + return container_of(slave, struct dw_dma_slave, slave); +} + +#endif /* DW_DMAC_H */ diff --git a/include/linux/efi.h b/include/linux/efi.h index a5f359a7ad0e..807373d467f7 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -287,7 +287,6 @@ efi_guid_unparse(efi_guid_t *guid, char *out) extern void efi_init (void); extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); -extern void efi_map_memmap(void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ @@ -295,14 +294,11 @@ extern u64 efi_get_iobase (void); extern u32 efi_mem_type (unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); -extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, - u64 attr); extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern unsigned long efi_get_time(void); extern int efi_set_rtc_mmss(unsigned long nowtime); -extern int is_available_memory(efi_memory_desc_t * md); extern struct efi_memory_map memmap; /** diff --git a/include/linux/eisa.h b/include/linux/eisa.h index fe806b6f030d..e61c0be2a459 100644 --- a/include/linux/eisa.h +++ b/include/linux/eisa.h @@ -40,7 +40,7 @@ struct eisa_device { u64 dma_mask; struct device dev; /* generic device */ #ifdef CONFIG_EISA_NAMES - char pretty_name[DEVICE_NAME_SIZE]; + char pretty_name[50]; #endif }; diff --git a/include/linux/elf.h b/include/linux/elf.h index ff9fbed90123..edc3dac3f02f 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -358,6 +358,7 @@ typedef struct elf64_shdr { #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ +#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c8d216357865..8bb5e87df365 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -272,6 +272,12 @@ enum ethtool_flags { ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ }; +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; +}; + #ifdef __KERNEL__ struct net_device; @@ -396,6 +402,8 @@ struct ethtool_ops { /* the following hooks are obsolete */ int (*self_test_count)(struct net_device *);/* use get_sset_count */ int (*get_stats_count)(struct net_device *);/* use get_sset_count */ + int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *); + int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *); }; #endif /* __KERNEL__ */ @@ -442,6 +450,9 @@ struct ethtool_ops { #define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ #define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ +#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ +#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ + /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET #define SPARC_ETH_SSET ETHTOOL_SSET @@ -528,4 +539,26 @@ struct ethtool_ops { #define WAKE_MAGIC (1 << 5) #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ +/* L3-L4 network traffic flow types */ +#define TCP_V4_FLOW 0x01 +#define UDP_V4_FLOW 0x02 +#define SCTP_V4_FLOW 0x03 +#define AH_ESP_V4_FLOW 0x04 +#define TCP_V6_FLOW 0x05 +#define UDP_V6_FLOW 0x06 +#define SCTP_V6_FLOW 0x07 +#define AH_ESP_V6_FLOW 0x08 + +/* L3-L4 network traffic flow hash options */ +#define RXH_DEV_PORT (1 << 0) +#define RXH_L2DA (1 << 1) +#define RXH_VLAN (1 << 2) +#define RXH_L3_PROTO (1 << 3) +#define RXH_IP_SRC (1 << 4) +#define RXH_IP_DST (1 << 5) +#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ +#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ +#define RXH_DISCARD (1 << 31) + + #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h new file mode 100644 index 000000000000..acbdbcc16051 --- /dev/null +++ b/include/linux/firmware-map.h @@ -0,0 +1,74 @@ +/* + * include/linux/firmware-map.h: + * Copyright (C) 2008 SUSE LINUX Products GmbH + * by Bernhard Walle <bwalle@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_FIRMWARE_MAP_H +#define _LINUX_FIRMWARE_MAP_H + +#include <linux/list.h> +#include <linux/kobject.h> + +/* + * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled + */ +#ifdef CONFIG_FIRMWARE_MEMMAP + +/** + * Adds a firmware mapping entry. This function uses kmalloc() for memory + * allocation. Use firmware_map_add_early() if you want to use the bootmem + * allocator. + * + * That function must be called before late_initcall. + * + * @start: Start of the memory range. + * @end: End of the memory range (inclusive). + * @type: Type of the memory range. + * + * Returns 0 on success, or -ENOMEM if no memory could be allocated. + */ +int firmware_map_add(resource_size_t start, resource_size_t end, + const char *type); + +/** + * Adds a firmware mapping entry. This function uses the bootmem allocator + * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). + * + * That function must be called before late_initcall. + * + * @start: Start of the memory range. + * @end: End of the memory range (inclusive). + * @type: Type of the memory range. + * + * Returns 0 on success, or -ENOMEM if no memory could be allocated. + */ +int firmware_map_add_early(resource_size_t start, resource_size_t end, + const char *type); + +#else /* CONFIG_FIRMWARE_MEMMAP */ + +static inline int firmware_map_add(resource_size_t start, resource_size_t end, + const char *type) +{ + return 0; +} + +static inline int firmware_map_add_early(resource_size_t start, + resource_size_t end, const char *type) +{ + return 0; +} + +#endif /* CONFIG_FIRMWARE_MEMMAP */ + +#endif /* _LINUX_FIRMWARE_MAP_H */ diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 6c7eff2ebada..c8ecf5b2a207 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -1,18 +1,39 @@ #ifndef _LINUX_FIRMWARE_H #define _LINUX_FIRMWARE_H + #include <linux/module.h> #include <linux/types.h> +#include <linux/compiler.h> + #define FIRMWARE_NAME_MAX 30 #define FW_ACTION_NOHOTPLUG 0 #define FW_ACTION_HOTPLUG 1 struct firmware { size_t size; - u8 *data; + const u8 *data; }; struct device; +struct builtin_fw { + char *name; + void *data; + unsigned long size; +}; + +/* We have to play tricks here much like stringify() to get the + __COUNTER__ macro to be expanded as we want it */ +#define __fw_concat1(x, y) x##y +#define __fw_concat(x, y) __fw_concat1(x, y) + +#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ + DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) + +#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ + static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ + __used __section(.builtin_fw) = { name, blob, size } + #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) int request_firmware(const struct firmware **fw, const char *name, struct device *device); diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 08934995c7ab..deddeedf3257 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -128,6 +128,15 @@ static inline void set_freezable(void) } /* + * Tell the freezer that the current task should be frozen by it and that it + * should send a fake signal to the task to freeze it. + */ +static inline void set_freezable_with_signal(void) +{ + current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); +} + +/* * Freezer-friendly wrappers around wait_event_interruptible() and * wait_event_interruptible_timeout(), originally defined in <linux/wait.h> */ @@ -174,6 +183,7 @@ static inline void freezer_do_not_count(void) {} static inline void freezer_count(void) {} static inline int freezer_should_skip(struct task_struct *p) { return 0; } static inline void set_freezable(void) {} +static inline void set_freezable_with_signal(void) {} #define wait_event_freezable(wq, condition) \ wait_event_interruptible(wq, condition) diff --git a/include/linux/fs.h b/include/linux/fs.h index d8e2762ed14d..9c2ac5c0ef5c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -918,12 +918,12 @@ struct file_lock { struct list_head fl_link; /* doubly linked list of all locks */ struct list_head fl_block; /* circular list of blocked processes */ fl_owner_t fl_owner; + unsigned char fl_flags; + unsigned char fl_type; unsigned int fl_pid; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; - unsigned char fl_flags; - unsigned char fl_type; loff_t fl_start; loff_t fl_end; @@ -1729,6 +1729,8 @@ static inline void invalidate_remote_inode(struct inode *inode) extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); +extern void generic_sync_sb_inodes(struct super_block *sb, + struct writeback_control *wbc); extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); @@ -1740,6 +1742,8 @@ extern int wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end); extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); +extern int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); extern long do_fsync(struct file *file, int datasync); extern void sync_supers(void); @@ -1870,7 +1874,8 @@ extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); -extern loff_t remote_llseek(struct file *file, loff_t offset, int origin); +extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, + int origin); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 9bc045b8c478..8300cab30f9a 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -103,10 +103,6 @@ struct fs_mii_bb_platform_info { struct fs_mii_bit mdio_dir; struct fs_mii_bit mdio_dat; struct fs_mii_bit mdc_dat; - int mdio_port; /* port & bit for MDIO */ - int mdio_bit; - int mdc_port; /* port & bit for MDC */ - int mdc_bit; int delay; /* delay in us */ int irq[32]; /* irqs per phy's */ }; @@ -135,11 +131,7 @@ struct fs_platform_info { u32 device_flags; int phy_addr; /* the phy address (-1 no phy) */ -#ifdef CONFIG_PPC_CPM_NEW_BINDING char bus_id[16]; -#else - const char* bus_id; -#endif int phy_irq; /* the phy irq (if it exists) */ const struct fs_mii_bus_info *bus_info; diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index c415a496de3a..4e625e0094c8 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -69,6 +69,7 @@ struct gianfar_mdio_data { #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 #define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 +#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 /* Flags in gianfar_platform_data */ #define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ @@ -125,4 +126,10 @@ struct mpc8xx_pcmcia_ops { int(*voltage_set)(int slot, int vcc, int vpp); }; +/* Returns non-zero if the current suspend operation would + * lead to a deep sleep (i.e. power removed from the core, + * instead of just the clock). + */ +int fsl_deep_sleep(void); + #endif /* _FSL_DEVICE_H_ */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h new file mode 100644 index 000000000000..f368d041e02d --- /dev/null +++ b/include/linux/ftrace.h @@ -0,0 +1,144 @@ +#ifndef _LINUX_FTRACE_H +#define _LINUX_FTRACE_H + +#ifdef CONFIG_FTRACE + +#include <linux/linkage.h> +#include <linux/fs.h> + +extern int ftrace_enabled; +extern int +ftrace_enable_sysctl(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos); + +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; +}; + +/* + * The ftrace_ops must be a static and should also + * be read_mostly. These functions do modify read_mostly variables + * so use them sparely. Never free an ftrace_op or modify the + * next pointer after it has been registered. Even after unregistering + * it, the next pointer may still be used internally. + */ +int register_ftrace_function(struct ftrace_ops *ops); +int unregister_ftrace_function(struct ftrace_ops *ops); +void clear_ftrace_function(void); + +extern void ftrace_stub(unsigned long a0, unsigned long a1); + +#else /* !CONFIG_FTRACE */ +# define register_ftrace_function(ops) do { } while (0) +# define unregister_ftrace_function(ops) do { } while (0) +# define clear_ftrace_function(ops) do { } while (0) +#endif /* CONFIG_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE +# define FTRACE_HASHBITS 10 +# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) + +enum { + FTRACE_FL_FREE = (1 << 0), + FTRACE_FL_FAILED = (1 << 1), + FTRACE_FL_FILTER = (1 << 2), + FTRACE_FL_ENABLED = (1 << 3), + FTRACE_FL_NOTRACE = (1 << 4), + FTRACE_FL_CONVERTED = (1 << 5), + FTRACE_FL_FROZEN = (1 << 6), +}; + +struct dyn_ftrace { + struct hlist_node node; + unsigned long ip; /* address of mcount call-site */ + unsigned long flags; +}; + +int ftrace_force_update(void); +void ftrace_set_filter(unsigned char *buf, int len, int reset); + +/* defined in arch */ +extern int ftrace_ip_converted(unsigned long ip); +extern unsigned char *ftrace_nop_replace(void); +extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); +extern int ftrace_dyn_arch_init(void *data); +extern int ftrace_mcount_set(unsigned long *data); +extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, + unsigned char *new_code); +extern int ftrace_update_ftrace_func(ftrace_func_t func); +extern void ftrace_caller(void); +extern void ftrace_call(void); +extern void mcount_call(void); + +extern int skip_trace(unsigned long ip); + +void ftrace_disable_daemon(void); +void ftrace_enable_daemon(void); + +#else +# define skip_trace(ip) ({ 0; }) +# define ftrace_force_update() ({ 0; }) +# define ftrace_set_filter(buf, len, reset) do { } while (0) +# define ftrace_disable_daemon() do { } while (0) +# define ftrace_enable_daemon() do { } while (0) +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* totally disable ftrace - can not re-enable after this */ +void ftrace_kill(void); +void ftrace_kill_atomic(void); + +static inline void tracer_disable(void) +{ +#ifdef CONFIG_FTRACE + ftrace_enabled = 0; +#endif +} + +#ifdef CONFIG_FRAME_POINTER +/* TODO: need to fix this for ARM */ +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) +# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) +# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) +# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) +# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) +# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +#else +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 0UL +# define CALLER_ADDR2 0UL +# define CALLER_ADDR3 0UL +# define CALLER_ADDR4 0UL +# define CALLER_ADDR5 0UL +# define CALLER_ADDR6 0UL +#endif + +#ifdef CONFIG_IRQSOFF_TRACER + extern void time_hardirqs_on(unsigned long a0, unsigned long a1); + extern void time_hardirqs_off(unsigned long a0, unsigned long a1); +#else +# define time_hardirqs_on(a0, a1) do { } while (0) +# define time_hardirqs_off(a0, a1) do { } while (0) +#endif + +#ifdef CONFIG_PREEMPT_TRACER + extern void trace_preempt_on(unsigned long a0, unsigned long a1); + extern void trace_preempt_off(unsigned long a0, unsigned long a1); +#else +# define trace_preempt_on(a0, a1) do { } while (0) +# define trace_preempt_off(a0, a1) do { } while (0) +#endif + +#ifdef CONFIG_TRACING +extern void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); +#else +static inline void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } +#endif + +#endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/gameport.h b/include/linux/gameport.h index afad95272841..f64e29c0ef3f 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -68,7 +68,6 @@ struct gameport_driver { int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode); void gameport_close(struct gameport *gameport); -void gameport_rescan(struct gameport *gameport); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h index 110833666e37..4cc913939817 100644 --- a/include/linux/generic_serial.h +++ b/include/linux/generic_serial.h @@ -14,6 +14,7 @@ #ifdef __KERNEL__ #include <linux/mutex.h> +#include <linux/tty.h> struct real_driver { void (*disable_tx_interrupts) (void *); @@ -33,17 +34,12 @@ struct real_driver { struct gs_port { int magic; + struct tty_port port; unsigned char *xmit_buf; int xmit_head; int xmit_tail; int xmit_cnt; struct mutex port_write_mutex; - int flags; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; - int count; - int blocked_open; - struct tty_struct *tty; unsigned long event; unsigned short closing_wait; int close_delay; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index ae7aec3cabee..e8787417f65a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -110,6 +110,14 @@ struct hd_struct { #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_FAIL 64 +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_scsi_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; struct gendisk { int major; /* major number of driver */ @@ -120,6 +128,7 @@ struct gendisk { struct hd_struct **part; /* [indexed by minor] */ struct block_device_operations *fops; struct request_queue *queue; + struct blk_scsi_cmd_filter cmd_filter; void *private_data; sector_t capacity; @@ -141,6 +150,9 @@ struct gendisk { struct disk_stats dkstats; #endif struct work_struct async_notify; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +#endif }; /* diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index c6d3a9de5634..ec6ecd74781d 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -9,6 +9,7 @@ struct gpio_keys_button { char *desc; int type; /* input event type (EV_KEY, EV_SW) */ int wakeup; /* configure the button as a wake-up source */ + int debounce_interval; /* debounce ticks interval in msecs */ }; struct gpio_keys_platform_data { diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h index 2177ee5b2fe2..940aeb51d53f 100644 --- a/include/linux/hayesesp.h +++ b/include/linux/hayesesp.h @@ -76,11 +76,10 @@ struct hayes_esp_config { struct esp_struct { int magic; + struct tty_port port; spinlock_t lock; - int port; + int io_port; int irq; - int flags; /* defined in tty.h */ - struct tty_struct *tty; int read_status_mask; int ignore_status_mask; int timeout; @@ -93,14 +92,10 @@ struct esp_struct { int MCR; /* Modem control register */ unsigned long last_active; int line; - int count; /* # of fd on device */ - int blocked_open; /* # of blocked opens */ unsigned char *xmit_buf; int xmit_head; int xmit_tail; int xmit_cnt; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; wait_queue_head_t delta_msr_wait; wait_queue_head_t break_wait; struct async_icount icount; /* kernel counters for the 4 input interrupts */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index 6115545a5b9c..c59769693bee 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -45,7 +45,6 @@ struct hdlc_proto { /* Pointed to by dev->priv */ typedef struct hdlc_device { - struct net_device_stats stats; /* used by HDLC layer to take control over HDLC device from hw driver*/ int (*attach)(struct net_device *dev, unsigned short encoding, unsigned short parity); @@ -109,12 +108,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, /* May be used by hardware driver to gain control over HDLC device */ void detach_hdlc_protocol(struct net_device *dev); -static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev) -{ - return &dev_to_hdlc(dev)->stats; -} - - static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, struct net_device *dev) { diff --git a/include/linux/hid.h b/include/linux/hid.h index fe56b86f2c67..ac4e678a04ed 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -512,7 +512,7 @@ struct hid_descriptor { /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) /* HID core API */ diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h index 77afbb60fd11..0177d280f733 100644 --- a/include/linux/i2c-algo-pcf.h +++ b/include/linux/i2c-algo-pcf.h @@ -33,9 +33,11 @@ struct i2c_algo_pcf_data { int (*getclock) (void *data); void (*waitforpin) (void); - /* local settings */ - int udelay; - int timeout; + /* Multi-master lost arbitration back-off delay (msecs) + * This should be set by the bus adapter or knowledgable client + * if bus is multi-mastered, else zero + */ + unsigned long lab_mdelay; }; int i2c_pcf_add_bus(struct i2c_adapter *); diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 580acc93903e..4862398e05bf 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -33,15 +33,11 @@ #define I2C_DRIVERID_MSP3400 1 #define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */ #define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ #define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ #define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ -#define I2C_DRIVERID_TEA6300 18 /* audio mixer */ -#define I2C_DRIVERID_TDA9850 20 /* audio mixer */ -#define I2C_DRIVERID_TDA9855 21 /* audio mixer */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ #define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ @@ -50,9 +46,7 @@ #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ #define I2C_DRIVERID_TVMIXER 28 /* Mixer driver for tv cards */ #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_TDA9873 31 /* TV sound decoder chip */ #define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */ -#define I2C_DRIVERID_PIC16C54_PV9 33 /* Audio mux/ir receiver */ #define I2C_DRIVERID_BT819 40 /* video decoder */ #define I2C_DRIVERID_BT856 41 /* video encoder */ #define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */ @@ -63,7 +57,6 @@ #define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ #define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */ #define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */ -#define I2C_DRIVERID_TDA9874 66 /* TV sound decoder */ #define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */ #define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */ #define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */ @@ -91,8 +84,6 @@ #define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ #define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ -#define I2C_DRIVERID_I2CDEV 900 - #define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ /* @@ -111,7 +102,6 @@ #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ #define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ #define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ -#define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */ #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ #define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */ #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ @@ -161,7 +151,6 @@ #define I2C_HW_SMBUS_W9968CF 0x04000d #define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ #define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ -#define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */ #define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ #define I2C_HW_SMBUS_ALI1563 0x040013 diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 8dc730132192..08be0d21864c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -35,6 +35,8 @@ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +extern struct bus_type i2c_bus_type; + /* --- General options ------------------------------------------------ */ struct i2c_msg; @@ -43,6 +45,7 @@ struct i2c_adapter; struct i2c_client; struct i2c_driver; union i2c_smbus_data; +struct i2c_board_info; /* * The master routines are the ones normally used to transmit data to devices @@ -69,9 +72,8 @@ extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, union i2c_smbus_data * data); /* Now follow the 'nice' access routines. These also document the calling - conventions of smbus_access. */ + conventions of i2c_smbus_xfer. */ -extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value); extern s32 i2c_smbus_read_byte(struct i2c_client * client); extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); @@ -93,15 +95,33 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client, u8 command, u8 length, const u8 *values); -/* - * A driver is capable of handling one or more physical devices present on - * I2C adapters. This information is used to inform the driver of adapter - * events. +/** + * struct i2c_driver - represent an I2C device driver + * @class: What kind of i2c device we instantiate (for detect) + * @detect: Callback for device detection + * @address_data: The I2C addresses to probe, ignore or force (for detect) + * @clients: List of detected clients we created (for i2c-core use only) * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. + * + * For automatic device detection, both @detect and @address_data must + * be defined. @class should also be set, otherwise only devices forced + * with module parameters will be created. The detect function must + * fill at least the name field of the i2c_board_info structure it is + * handed upon successful detection, and possibly also the flags field. + * + * If @detect is missing, the driver will still work fine for enumerated + * devices. Detected devices simply won't be supported. This is expected + * for the many I2C/SMBus devices which can't be detected reliably, and + * the ones which can always be enumerated in practice. + * + * The i2c_client structure which is handed to the @detect callback is + * not a real i2c_client. It is initialized just enough so that you can + * call i2c_smbus_read_byte_data and friends on it. Don't do anything + * else with it. In particular, calling dev_dbg and friends on it is + * not allowed. */ - struct i2c_driver { int id; unsigned int class; @@ -141,6 +161,11 @@ struct i2c_driver { struct device_driver driver; const struct i2c_device_id *id_table; + + /* Device detection callback for automatic device creation */ + int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); + const struct i2c_client_address_data *address_data; + struct list_head clients; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -156,6 +181,7 @@ struct i2c_driver { * @dev: Driver model device node for the slave. * @irq: indicates the IRQ generated by this device (if any) * @list: list of active/busy clients (DEPRECATED) + * @detected: member of an i2c_driver.clients list * @released: used to synchronize client releases & detaches and references * * An i2c_client identifies a single device (i.e. chip) connected to an @@ -173,6 +199,7 @@ struct i2c_client { struct device dev; /* the device structure */ int irq; /* irq issued by device */ struct list_head list; /* DEPRECATED */ + struct list_head detected; struct completion released; }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -350,10 +377,11 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ #define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ -#define I2C_CLASS_DDC (1<<3) /* i2c-matroxfb ? */ +#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */ #define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */ #define I2C_CLASS_SOUND (1<<6) /* sound devices */ +#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ #define I2C_CLASS_ALL (UINT_MAX) /* all of the above */ /* i2c_client_address_data is the struct for holding default client @@ -537,7 +565,7 @@ union i2c_smbus_data { /* and one more for user-space compatibility */ }; -/* smbus_access read or write markers */ +/* i2c_smbus_xfer read or write markers */ #define I2C_SMBUS_READ 1 #define I2C_SMBUS_WRITE 0 diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h new file mode 100644 index 000000000000..f6edd522a929 --- /dev/null +++ b/include/linux/i2c/at24.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_AT24_H +#define _LINUX_AT24_H + +#include <linux/types.h> + +/* + * As seen through Linux I2C, differences between the most common types of I2C + * memory include: + * - How much memory is available (usually specified in bit)? + * - What write page size does it support? + * - Special flags (16 bit addresses, read_only, world readable...)? + * + * If you set up a custom eeprom type, please double-check the parameters. + * Especially page_size needs extra care, as you risk data loss if your value + * is bigger than what the chip actually supports! + */ + +struct at24_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; +#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ +}; + +#endif /* _LINUX_AT24_H */ diff --git a/include/linux/ide.h b/include/linux/ide.h index eddb6daadf4a..d67ccca2b964 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -139,6 +139,12 @@ struct ide_io_ports { #define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */ /* + * Op codes for special requests to be handled by ide_special_rq(). + * Values should be in the range of 0x20 to 0x3f. + */ +#define REQ_DRIVE_RESET 0x20 + +/* * Check for an interrupt and acknowledge the interrupt status */ struct hwif_s; @@ -171,7 +177,8 @@ typedef struct hw_regs_s { int irq; /* our irq number */ ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ hwif_chipset_t chipset; - struct device *dev; + struct device *dev, *parent; + unsigned long config; } hw_regs_t; void ide_init_port_data(struct hwif_s *, unsigned int); @@ -301,7 +308,65 @@ struct ide_acpi_drive_link; struct ide_acpi_hwif_link; #endif -typedef struct ide_drive_s { +/* ATAPI device flags */ +enum { + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + IDE_AFLAG_MEDIA_CHANGED = (1 << 1), + + /* ide-cd */ + /* Drive cannot lock the door. */ + IDE_AFLAG_NO_DOORLOCK = (1 << 2), + /* Drive cannot eject the disc. */ + IDE_AFLAG_NO_EJECT = (1 << 3), + /* Drive is a pre ATAPI 1.2 drive. */ + IDE_AFLAG_PRE_ATAPI12 = (1 << 4), + /* TOC addresses are in BCD. */ + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), + /* TOC track numbers are in BCD. */ + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), + /* + * Drive does not provide data in multiples of SECTOR_SIZE + * when more than one interrupt is needed. + */ + IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), + /* Seeking in progress. */ + IDE_AFLAG_SEEKING = (1 << 8), + /* Saved TOC information is current. */ + IDE_AFLAG_TOC_VALID = (1 << 9), + /* We think that the drive door is locked. */ + IDE_AFLAG_DOOR_LOCKED = (1 << 10), + /* SET_CD_SPEED command is unsupported. */ + IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), + IDE_AFLAG_VERTOS_300_SSD = (1 << 12), + IDE_AFLAG_VERTOS_600_ESD = (1 << 13), + IDE_AFLAG_SANYO_3CD = (1 << 14), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), + + /* ide-floppy */ + /* Format in progress */ + IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18), + /* Avoid commands not supported in Clik drive */ + IDE_AFLAG_CLIK_DRIVE = (1 << 19), + /* Requires BH algorithm for packets */ + IDE_AFLAG_ZIP_DRIVE = (1 << 20), + + /* ide-tape */ + IDE_AFLAG_IGNORE_DSC = (1 << 21), + /* 0 When the tape position is unknown */ + IDE_AFLAG_ADDRESS_VALID = (1 << 22), + /* Device already opened */ + IDE_AFLAG_BUSY = (1 << 23), + /* Attempt to auto-detect the current user block size */ + IDE_AFLAG_DETECT_BS = (1 << 24), + /* Currently on a filemark */ + IDE_AFLAG_FILEMARK = (1 << 25), + /* 0 = no tape is loaded, so we don't rewind after ejecting */ + IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) +}; + +struct ide_drive_s { char name[4]; /* drive name, such as "hda" */ char driver_req[10]; /* requests specific driver */ @@ -349,7 +414,6 @@ typedef struct ide_drive_s { unsigned nodma : 1; /* disallow DMA */ unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ - unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; @@ -364,7 +428,6 @@ typedef struct ide_drive_s { u8 wcache; /* status of write cache */ u8 acoustic; /* acoustic management */ u8 media; /* disk, cdrom, tape, floppy, ... */ - u8 ctl; /* "normal" value for Control register */ u8 ready_stat; /* min status value for drive ready */ u8 mult_count; /* current multiple sector setting */ u8 mult_req; /* requested multiple sector setting */ @@ -395,7 +458,14 @@ typedef struct ide_drive_s { struct list_head list; struct device gendev; struct completion gendev_rel_comp; /* to deal with device release() */ -} ide_drive_t; + + /* callback for packet commands */ + void (*pc_callback)(struct ide_drive_s *); + + unsigned long atapi_flags; +}; + +typedef struct ide_drive_s ide_drive_t; #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) @@ -403,11 +473,31 @@ typedef struct ide_drive_s { ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) +struct ide_task_s; struct ide_port_info; +struct ide_tp_ops { + void (*exec_command)(struct hwif_s *, u8); + u8 (*read_status)(struct hwif_s *); + u8 (*read_altstatus)(struct hwif_s *); + u8 (*read_sff_dma_status)(struct hwif_s *); + + void (*set_irq)(struct hwif_s *, int); + + void (*tf_load)(ide_drive_t *, struct ide_task_s *); + void (*tf_read)(ide_drive_t *, struct ide_task_s *); + + void (*input_data)(ide_drive_t *, struct request *, void *, + unsigned int); + void (*output_data)(ide_drive_t *, struct request *, void *, + unsigned int); +}; + +extern const struct ide_tp_ops default_tp_ops; + struct ide_port_ops { - /* host specific initialization of devices on a port */ - void (*port_init_devs)(struct hwif_s *); + /* host specific initialization of a device */ + void (*init_dev)(ide_drive_t *); /* routine to program host for PIO mode */ void (*set_pio_mode)(ide_drive_t *, const u8); /* routine to program host for DMA mode */ @@ -442,8 +532,6 @@ struct ide_dma_ops { void (*dma_timeout)(struct ide_drive_s *); }; -struct ide_task_s; - typedef struct hwif_s { struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ struct hwif_s *mate; /* other hwif from same PCI chip */ @@ -481,22 +569,12 @@ typedef struct hwif_s { void (*rw_disk)(ide_drive_t *, struct request *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; - void (*tf_load)(ide_drive_t *, struct ide_task_s *); - void (*tf_read)(ide_drive_t *, struct ide_task_s *); - - void (*input_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*output_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*ide_dma_clear_irq)(ide_drive_t *drive); - void (*OUTB)(u8 addr, unsigned long port); - void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port); - - u8 (*INB)(unsigned long port); - /* dma physical region descriptor table (cpu view) */ unsigned int *dmatable_cpu; /* dma physical region descriptor table (dma view) */ @@ -519,8 +597,6 @@ typedef struct hwif_s { int irq; /* our irq number */ unsigned long dma_base; /* base addr for dma ports */ - unsigned long dma_command; /* dma command register */ - unsigned long dma_status; /* dma status register */ unsigned long config_data; /* for use by chipset-specific code */ unsigned long select_data; /* for use by chipset-specific code */ @@ -532,7 +608,6 @@ typedef struct hwif_s { unsigned serialized : 1; /* serialized all channel operation */ unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ - unsigned mmio : 1; /* host uses MMIO */ struct device gendev; struct device *portdev; @@ -548,6 +623,11 @@ typedef struct hwif_s { #endif } ____cacheline_internodealigned_in_smp ide_hwif_t; +struct ide_host { + ide_hwif_t *ports[MAX_HWIFS]; + unsigned int n_ports; +}; + /* * internal ide interrupt handler type */ @@ -567,8 +647,6 @@ typedef struct hwgroup_s { unsigned int sleeping : 1; /* BOOL: polling active & poll_timeout field valid */ unsigned int polling : 1; - /* BOOL: in a polling reset situation. Must not trigger another reset yet */ - unsigned int resetting : 1; /* current drive */ ide_drive_t *drive; @@ -604,12 +682,11 @@ enum { PC_FLAG_SUPPRESS_ERROR = (1 << 1), PC_FLAG_WAIT_FOR_DSC = (1 << 2), PC_FLAG_DMA_OK = (1 << 3), - PC_FLAG_DMA_RECOMMENDED = (1 << 4), - PC_FLAG_DMA_IN_PROGRESS = (1 << 5), - PC_FLAG_DMA_ERROR = (1 << 6), - PC_FLAG_WRITING = (1 << 7), + PC_FLAG_DMA_IN_PROGRESS = (1 << 4), + PC_FLAG_DMA_ERROR = (1 << 5), + PC_FLAG_WRITING = (1 << 6), /* command timed out */ - PC_FLAG_TIMEDOUT = (1 << 8), + PC_FLAG_TIMEDOUT = (1 << 7), }; struct ide_atapi_pc { @@ -642,8 +719,6 @@ struct ide_atapi_pc { * to change/removal later. */ u8 pc_buf[256]; - void (*idefloppy_callback) (ide_drive_t *); - ide_startstop_t (*idetape_callback) (ide_drive_t *); /* idetape only */ struct idetape_bh *bh; @@ -787,7 +862,6 @@ struct ide_driver_s { ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); int (*end_request)(ide_drive_t *, int, int); ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); - ide_startstop_t (*abort)(ide_drive_t *, struct request *rq); struct device_driver gen_driver; int (*probe)(ide_drive_t *); void (*remove)(ide_drive_t *); @@ -802,32 +876,9 @@ struct ide_driver_s { int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); -/* - * ide_hwifs[] is the master data structure used to keep track - * of just about everything in ide.c. Whenever possible, routines - * should be using pointers to a drive (ide_drive_t *) or - * pointers to a hwif (ide_hwif_t *), rather than indexing this - * structure directly (the allocation/layout may change!). - * - */ -#ifndef _IDE_C -extern ide_hwif_t ide_hwifs[]; /* master data repository */ -#endif -extern int ide_noacpi; -extern int ide_acpigtf; -extern int ide_acpionboot; -extern int noautodma; - extern int ide_vlb_clk; extern int ide_pci_clk; -ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); - -static inline ide_hwif_t *ide_find_port(void) -{ - return ide_find_port_slot(NULL); -} - extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, int uptodate, int nr_sectors); @@ -845,10 +896,6 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); -ide_startstop_t __ide_abort(ide_drive_t *, struct request *); - -extern ide_startstop_t ide_abort(ide_drive_t *, const char *); - extern void ide_fix_driveid(struct hd_driveid *); extern void ide_fixstring(u8 *, const int, const int); @@ -857,25 +904,12 @@ int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); extern ide_startstop_t ide_do_reset (ide_drive_t *); -extern void ide_init_drive_cmd (struct request *rq); - -/* - * "action" parameter type for ide_do_drive_cmd() below. - */ -typedef enum { - ide_wait, /* insert rq at end of list, and wait for it */ - ide_preempt, /* insert rq in front of current request */ - ide_head_wait, /* insert rq in front of current request and wait for it */ - ide_end /* insert rq at end of list, but don't wait for it */ -} ide_action_t; - -extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t); +extern void ide_do_drive_cmd(ide_drive_t *, struct request *); extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); enum { IDE_TFLAG_LBA48 = (1 << 0), - IDE_TFLAG_NO_SELECT_MASK = (1 << 1), IDE_TFLAG_FLAGGED = (1 << 2), IDE_TFLAG_OUT_DATA = (1 << 3), IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), @@ -915,6 +949,7 @@ enum { IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | IDE_TFLAG_IN_HOB_NSECT | IDE_TFLAG_IN_HOB_LBA, + IDE_TFLAG_IN_FEATURE = (1 << 1), IDE_TFLAG_IN_NSECT = (1 << 25), IDE_TFLAG_IN_LBAL = (1 << 26), IDE_TFLAG_IN_LBAM = (1 << 27), @@ -979,12 +1014,40 @@ typedef struct ide_task_s { void ide_tf_dump(const char *, struct ide_taskfile *); +void ide_exec_command(ide_hwif_t *, u8); +u8 ide_read_status(ide_hwif_t *); +u8 ide_read_altstatus(ide_hwif_t *); +u8 ide_read_sff_dma_status(ide_hwif_t *); + +void ide_set_irq(ide_hwif_t *, int); + +void ide_tf_load(ide_drive_t *, ide_task_t *); +void ide_tf_read(ide_drive_t *, ide_task_t *); + +void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); + extern void SELECT_DRIVE(ide_drive_t *); +void SELECT_MASK(ide_drive_t *, int); + +u8 ide_read_error(ide_drive_t *); +void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); extern int drive_is_ready(ide_drive_t *); void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); +ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, + ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry, + void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *), + void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *), + void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int, + int)); +ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *, + ide_handler_t *, unsigned int, ide_expiry_t *); +ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *, + ide_handler_t *, unsigned int, ide_expiry_t *); + ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); void task_end_request(ide_drive_t *, struct request *, u8); @@ -996,8 +1059,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long); -extern int system_bus_clock(void); - extern int ide_driveid_update(ide_drive_t *); extern int ide_config_drive_speed(ide_drive_t *, u8); extern u8 eighty_ninty_three (ide_drive_t *); @@ -1021,12 +1082,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o #define ide_pci_register_driver(d) pci_register_driver(d) #endif -void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *); +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, + hw_regs_t *, hw_regs_t **); void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI int ide_pci_set_master(struct pci_dev *, const char *); unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); +extern const struct ide_dma_ops sff_dma_ops; +int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); #else static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, @@ -1036,10 +1100,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, } #endif -extern void default_hwif_iops(ide_hwif_t *); -extern void default_hwif_mmiops(ide_hwif_t *); -extern void default_hwif_transport(ide_hwif_t *); - typedef struct ide_pci_enablebit_s { u8 reg; /* byte pci reg holding the enable-bit */ u8 mask; /* mask to isolate the enable-bit */ @@ -1102,7 +1162,6 @@ enum { IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), /* serialize ports if DMA is possible (for sl82c105) */ IDE_HFLAG_SERIALIZE_DMA = (1 << 27), /* force host out of "simplex" mode */ @@ -1113,8 +1172,6 @@ enum { IDE_HFLAG_NO_IO_32BIT = (1 << 30), /* never unmask IRQs */ IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), - /* host uses VDMA (disabled for now) */ - IDE_HFLAG_VDMA = 0, }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1131,6 +1188,7 @@ struct ide_port_info { int (*init_dma)(ide_hwif_t *, const struct ide_port_info *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; @@ -1184,7 +1242,6 @@ void ide_destroy_dmatable(ide_drive_t *); extern int ide_build_dmatable(ide_drive_t *, struct request *); int ide_allocate_dma_engine(ide_hwif_t *); void ide_release_dma_engine(ide_hwif_t *); -void ide_setup_dma(ide_hwif_t *, unsigned long); void ide_dma_host_set(ide_drive_t *, int); extern int ide_dma_setup(ide_drive_t *); @@ -1238,8 +1295,14 @@ void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); -int ide_device_add_all(u8 *idx, const struct ide_port_info *); -int ide_device_add(u8 idx[4], const struct ide_port_info *); +struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **); +struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); +void ide_host_free(struct ide_host *); +int ide_host_register(struct ide_host *, const struct ide_port_info *, + hw_regs_t **); +int ide_host_add(const struct ide_port_info *, hw_regs_t **, + struct ide_host **); +void ide_host_remove(struct ide_host *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); void ide_port_unregister_devices(ide_hwif_t *); void ide_port_scan(ide_hwif_t *); @@ -1279,16 +1342,43 @@ static inline int ide_dev_is_sata(struct hd_driveid *id) u64 ide_get_lba_addr(struct ide_taskfile *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); -typedef struct ide_pio_timings_s { - int setup_time; /* Address setup (ns) minimum */ - int active_time; /* Active pulse (ns) minimum */ - int cycle_time; /* Cycle time (ns) minimum = */ - /* active + recovery (+ setup for some chips) */ -} ide_pio_timings_t; +struct ide_timing { + u8 mode; + u8 setup; /* t1 */ + u16 act8b; /* t2 for 8-bit io */ + u16 rec8b; /* t2i for 8-bit io */ + u16 cyc8b; /* t0 for 8-bit io */ + u16 active; /* t2 or tD */ + u16 recover; /* t2i or tK */ + u16 cycle; /* t0 */ + u16 udma; /* t2CYCTYP/2 */ +}; + +enum { + IDE_TIMING_SETUP = (1 << 0), + IDE_TIMING_ACT8B = (1 << 1), + IDE_TIMING_REC8B = (1 << 2), + IDE_TIMING_CYC8B = (1 << 3), + IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | + IDE_TIMING_CYC8B, + IDE_TIMING_ACTIVE = (1 << 4), + IDE_TIMING_RECOVER = (1 << 5), + IDE_TIMING_CYCLE = (1 << 6), + IDE_TIMING_UDMA = (1 << 7), + IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | + IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | + IDE_TIMING_CYCLE | IDE_TIMING_UDMA, +}; + +struct ide_timing *ide_timing_find_mode(u8); +u16 ide_pio_cycle_time(ide_drive_t *, u8); +void ide_timing_merge(struct ide_timing *, struct ide_timing *, + struct ide_timing *, unsigned int); +int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); + +int ide_scan_pio_blacklist(char *); -unsigned int ide_pio_cycle_time(ide_drive_t *, u8); u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8); -extern const ide_pio_timings_t ide_pio_timings[6]; int ide_set_pio_mode(ide_drive_t *, u8); int ide_set_dma_mode(ide_drive_t *, u8); @@ -1344,32 +1434,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) return &hwif->drives[(drive->dn ^ 1) & 1]; } - -static inline void ide_set_irq(ide_drive_t *drive, int on) -{ - ide_hwif_t *hwif = drive->hwif; - - hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_status(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.status_addr); -} - -static inline u8 ide_read_altstatus(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_error(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.error_addr); -} #endif /* _IDE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 0b5e03eae6d2..a1630ba0b87c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -98,6 +98,9 @@ #define IEEE80211_MAX_SSID_LEN 32 #define IEEE80211_MAX_MESH_ID_LEN 32 +#define IEEE80211_QOS_CTL_LEN 2 +#define IEEE80211_QOS_CTL_TID_MASK 0x000F +#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 struct ieee80211_hdr { __le16 frame_control; @@ -109,6 +112,355 @@ struct ieee80211_hdr { u8 addr4[6]; } __attribute__ ((packed)); +/** + * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_tods(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; +} + +/** + * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_fromds(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; +} + +/** + * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_a4(__le16 fc) +{ + __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); + return (fc & tmp) == tmp; +} + +/** + * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_morefrags(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; +} + +/** + * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_retry(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; +} + +/** + * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_pm(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; +} + +/** + * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_moredata(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; +} + +/** + * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_protected(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; +} + +/** + * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_order(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; +} + +/** + * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_mgmt(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT); +} + +/** + * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ctl(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL); +} + +/** + * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_qos(__le16 fc) +{ + /* + * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need + * to check the one bit + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA); +} + +/** + * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_present(__le16 fc) +{ + /* + * mask with 0x40 and test that that bit is clear to only return true + * for the data-containing substypes. + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); +} + +/** + * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); +} + +/** + * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); +} + +/** + * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); +} + +/** + * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); +} + +/** + * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); +} + +/** + * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); +} + +/** + * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_atim(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); +} + +/** + * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_disassoc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); +} + +/** + * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_auth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); +} + +/** + * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_deauth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); +} + +/** + * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_action(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); +} + +/** + * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + +/** + * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); +} + +/** + * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_pspoll(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); +} + +/** + * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_rts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); +} + +/** + * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); +} + +/** + * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); +} + +/** + * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfend(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); +} + +/** + * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfendack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); +} + +/** + * ieee80211_is_nullfunc - check if FTYPE=IEEE80211_FTYPE_DATA and STYPE=IEEE80211_STYPE_NULLFUNC + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); +} struct ieee80211s_hdr { u8 flags; @@ -119,6 +471,40 @@ struct ieee80211s_hdr { u8 eaddr3[6]; } __attribute__ ((packed)); +/** + * struct ieee80211_quiet_ie + * + * This structure refers to "Quiet information element" + */ +struct ieee80211_quiet_ie { + u8 count; + u8 period; + __le16 duration; + __le16 offset; +} __attribute__ ((packed)); + +/** + * struct ieee80211_msrment_ie + * + * This structure refers to "Measurement Request/Report information element" + */ +struct ieee80211_msrment_ie { + u8 token; + u8 mode; + u8 type; + u8 request[0]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_channel_sw_ie + * + * This structure refers to "Channel Switch Announcement information element" + */ +struct ieee80211_channel_sw_ie { + u8 mode; + u8 new_ch_num; + u8 count; +} __attribute__ ((packed)); struct ieee80211_mgmt { __le16 frame_control; @@ -194,13 +580,18 @@ struct ieee80211_mgmt { u8 action_code; u8 element_id; u8 length; - u8 switch_mode; - u8 new_chan; - u8 switch_count; + struct ieee80211_channel_sw_ie sw_elem; } __attribute__((packed)) chan_switch; struct{ u8 action_code; u8 dialog_token; + u8 element_id; + u8 length; + struct ieee80211_msrment_ie msr_elem; + } __attribute__((packed)) measurement; + struct{ + u8 action_code; + u8 dialog_token; __le16 capab; __le16 timeout; __le16 start_seq_num; @@ -269,6 +660,10 @@ struct ieee80211_bar { __le16 start_seq_num; } __attribute__((packed)); +/* 802.11 BAR control masks */ +#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 + /** * struct ieee80211_ht_cap - HT capabilities * @@ -306,20 +701,33 @@ struct ieee80211_ht_addt_info { #define IEEE80211_HT_CAP_SGI_40 0x0040 #define IEEE80211_HT_CAP_DELAY_BA 0x0400 #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +/* 802.11n HT capability AMPDU settings */ #define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 #define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C +/* 802.11n HT capability MSC set */ +#define IEEE80211_SUPP_MCS_SET_UEQM 4 +#define IEEE80211_HT_CAP_MAX_STREAMS 4 +#define IEEE80211_SUPP_MCS_SET_LEN 10 +/* maximum streams the spec allows */ +#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01 +#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02 +#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C +#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10 /* 802.11n HT IE masks */ #define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00 +#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01 +#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03 #define IEEE80211_HT_IE_CHA_WIDTH 0x04 #define IEEE80211_HT_IE_HT_PROTECTION 0x0003 #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 /* MIMO Power Save Modes */ -#define WLAN_HT_CAP_MIMO_PS_STATIC 0 -#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 -#define WLAN_HT_CAP_MIMO_PS_INVALID 2 -#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 +#define WLAN_HT_CAP_MIMO_PS_STATIC 0 +#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 +#define WLAN_HT_CAP_MIMO_PS_INVALID 2 +#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 @@ -337,11 +745,21 @@ struct ieee80211_ht_addt_info { #define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) #define WLAN_CAPABILITY_PBCC (1<<6) #define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) + /* 802.11h */ #define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) #define WLAN_CAPABILITY_QOS (1<<9) #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) #define WLAN_CAPABILITY_DSSS_OFDM (1<<13) +/* measurement */ +#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0) +#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1) +#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2) + +#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 + /* 802.11g ERP information element */ #define WLAN_ERP_NON_ERP_PRESENT (1<<0) @@ -512,6 +930,15 @@ enum ieee80211_category { WLAN_CATEGORY_WMM = 17, }; +/* SPECTRUM_MGMT action code */ +enum ieee80211_spectrum_mgmt_actioncode { + WLAN_ACTION_SPCT_MSR_REQ = 0, + WLAN_ACTION_SPCT_MSR_RPRT = 1, + WLAN_ACTION_SPCT_TPC_REQ = 2, + WLAN_ACTION_SPCT_TPC_RPRT = 3, + WLAN_ACTION_SPCT_CHL_SWITCH = 4, +}; + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -540,63 +967,57 @@ enum ieee80211_back_parties { #define WLAN_MAX_KEY_LEN 32 /** + * ieee80211_get_qos_ctl - get pointer to qos control bytes + * @hdr: the frame + * + * The qos ctrl bytes come after the frame_control, duration, seq_num + * and 3 or 4 addresses of length ETH_ALEN. + * 3 addr: 2 + 2 + 2 + 3*6 = 24 + * 4 addr: 2 + 2 + 2 + 4*6 = 30 + */ +static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return (u8 *)hdr + 30; + else + return (u8 *)hdr + 24; +} + +/** * ieee80211_get_SA - get pointer to SA + * @hdr: the frame * * Given an 802.11 frame, this function returns the offset * to the source address (SA). It does not verify that the * header is long enough to contain the address, and the * header must be long enough to contain the frame control * field. - * - * @hdr: the frame */ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) { - u8 *raw = (u8 *) hdr; - u8 tofrom = (*(raw+1)) & 3; /* get the TODS and FROMDS bits */ - - switch (tofrom) { - case 2: - return hdr->addr3; - case 3: - return hdr->addr4; - } + if (ieee80211_has_a4(hdr->frame_control)) + return hdr->addr4; + if (ieee80211_has_fromds(hdr->frame_control)) + return hdr->addr3; return hdr->addr2; } /** * ieee80211_get_DA - get pointer to DA + * @hdr: the frame * * Given an 802.11 frame, this function returns the offset * to the destination address (DA). It does not verify that * the header is long enough to contain the address, and the * header must be long enough to contain the frame control * field. - * - * @hdr: the frame */ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) { - u8 *raw = (u8 *) hdr; - u8 to_ds = (*(raw+1)) & 1; /* get the TODS bit */ - - if (to_ds) + if (ieee80211_has_tods(hdr->frame_control)) return hdr->addr3; - return hdr->addr1; -} - -/** - * ieee80211_get_morefrag - determine whether the MOREFRAGS bit is set - * - * This function determines whether the "more fragments" bit is set - * in the frame. - * - * @hdr: the frame - */ -static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr) -{ - return (le16_to_cpu(hdr->frame_control) & - IEEE80211_FCTL_MOREFRAGS) != 0; + else + return hdr->addr1; } #endif /* IEEE80211_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 950e13d09e06..6badb3e2c4e4 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -4,8 +4,6 @@ * Authors: * Lennert Buytenhek <buytenh@gnu.org> * - * $Id: if_bridge.h,v 1.1 2000/02/18 16:47:01 davem Exp $ - * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index ad09609227ff..18db0668065a 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -43,6 +43,9 @@ struct sockaddr_ll #define PACKET_COPY_THRESH 7 #define PACKET_AUXDATA 8 #define PACKET_ORIGDEV 9 +#define PACKET_VERSION 10 +#define PACKET_HDRLEN 11 +#define PACKET_RESERVE 12 struct tpacket_stats { @@ -57,6 +60,7 @@ struct tpacket_auxdata __u32 tp_snaplen; __u16 tp_mac; __u16 tp_net; + __u16 tp_vlan_tci; }; struct tpacket_hdr @@ -79,6 +83,26 @@ struct tpacket_hdr #define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1)) #define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll)) +struct tpacket2_hdr +{ + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; +}; + +#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll)) + +enum tpacket_versions +{ + TPACKET_V1, + TPACKET_V2, +}; + /* Frame structure: diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h index 0f2f70d4e48c..c3b1f8562709 100644 --- a/include/linux/if_ppp.h +++ b/include/linux/if_ppp.h @@ -1,5 +1,3 @@ -/* $Id: if_ppp.h,v 1.21 2000/03/27 06:03:36 paulus Exp $ */ - /* * if_ppp.h - Point-to-Point Protocol definitions. * diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 8c71fe2fb1f5..4c6307ad9fdb 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -11,14 +11,13 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * $Id: if_tun.h,v 1.2 2001/06/01 18:39:47 davem Exp $ */ #ifndef __IF_TUN_H #define __IF_TUN_H #include <linux/types.h> +#include <linux/if_ether.h> /* Read queue size */ #define TUN_READQ_SIZE 500 @@ -33,6 +32,7 @@ #define TUN_NO_PI 0x0040 #define TUN_ONE_QUEUE 0x0080 #define TUN_PERSIST 0x0100 +#define TUN_VNET_HDR 0x0200 /* Ioctl defines */ #define TUNSETNOCSUM _IOW('T', 200, int) @@ -42,17 +42,43 @@ #define TUNSETOWNER _IOW('T', 204, int) #define TUNSETLINK _IOW('T', 205, int) #define TUNSETGROUP _IOW('T', 206, int) +#define TUNGETFEATURES _IOR('T', 207, unsigned int) +#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) +#define TUNSETTXFILTER _IOW('T', 209, unsigned int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 #define IFF_TAP 0x0002 #define IFF_NO_PI 0x1000 #define IFF_ONE_QUEUE 0x2000 +#define IFF_VNET_HDR 0x4000 + +/* Features for GSO (TUNSETOFFLOAD). */ +#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ +#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ +#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ +#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */ +#define TUN_PKT_STRIP 0x0001 struct tun_pi { - unsigned short flags; + __u16 flags; __be16 proto; }; -#define TUN_PKT_STRIP 0x0001 + +/* + * Filter spec (used for SETXXFILTER ioctls) + * This stuff is applicable only to the TAP (Ethernet) devices. + * If the count is zero the filter is disabled and the driver accepts + * all packets (promisc mode). + * If the filter is enabled in order to accept broadcast packets + * broadcast addr must be explicitly included in the addr list. + */ +#define TUN_FLT_ALLMULTI 0x0001 /* Accept all multicast packets */ +struct tun_filter { + __u16 flags; /* TUN_FLT_ flags see above */ + __u16 count; /* Number of addresses */ + __u8 addr[0][ETH_ALEN]; +}; #endif /* __IF_TUN_H */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 15ace02b7b24..9e7b49b8062d 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -14,10 +14,6 @@ #define _LINUX_IF_VLAN_H_ #ifdef __KERNEL__ - -/* externally defined structs */ -struct hlist_node; - #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -91,7 +87,7 @@ struct vlan_group { }; static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, - unsigned int vlan_id) + u16 vlan_id) { struct net_device **array; array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; @@ -99,7 +95,7 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, } static inline void vlan_group_set_device(struct vlan_group *vg, - unsigned int vlan_id, + u16 vlan_id, struct net_device *dev) { struct net_device **array; @@ -109,164 +105,81 @@ static inline void vlan_group_set_device(struct vlan_group *vg, array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; } -struct vlan_priority_tci_mapping { - u32 priority; - unsigned short vlan_qos; /* This should be shifted when first set, so we only do it - * at provisioning time. - * ((skb->priority << 13) & 0xE000) - */ - struct vlan_priority_tci_mapping *next; -}; +#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci) +#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci) -/* Holds information that makes sense if this device is a VLAN device. */ -struct vlan_dev_info { - /** This will be the mapping that correlates skb->priority to - * 3 bits of VLAN QOS tags... - */ - unsigned int nr_ingress_mappings; - u32 ingress_priority_map[8]; - - unsigned int nr_egress_mappings; - struct vlan_priority_tci_mapping *egress_priority_map[16]; /* hash table */ - - unsigned short vlan_id; /* The VLAN Identifier for this interface. */ - unsigned short flags; /* (1 << 0) re_order_header This option will cause the - * VLAN code to move around the ethernet header on - * ingress to make the skb look **exactly** like it - * came in from an ethernet port. This destroys some of - * the VLAN information in the skb, but it fixes programs - * like DHCP that use packet-filtering and don't understand - * 802.1Q - */ - struct net_device *real_dev; /* the underlying device/interface */ - unsigned char real_dev_addr[ETH_ALEN]; - struct proc_dir_entry *dent; /* Holds the proc data */ - unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */ - unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */ -}; +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); -static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) +extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, + u16 vlan_tci, int polling); +#else +static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - return netdev_priv(dev); + BUG(); + return NULL; } -/* inline functions */ -static inline __u32 vlan_get_ingress_priority(struct net_device *dev, - unsigned short vlan_tag) +static inline u16 vlan_dev_vlan_id(const struct net_device *dev) { - struct vlan_dev_info *vip = vlan_dev_info(dev); - - return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7]; + BUG(); + return 0; } -/* VLAN tx hw acceleration helpers. */ -struct vlan_skb_tx_cookie { - u32 magic; - u32 vlan_tag; -}; - -#define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */ -#define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0])) -#define vlan_tx_tag_present(__skb) \ - (VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC) -#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag) - -/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ -static inline int __vlan_hwaccel_rx(struct sk_buff *skb, - struct vlan_group *grp, - unsigned short vlan_tag, int polling) +static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, + u16 vlan_tci, int polling) { - struct net_device_stats *stats; - - if (skb_bond_should_drop(skb)) { - dev_kfree_skb_any(skb); - return NET_RX_DROP; - } - - skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK); - if (skb->dev == NULL) { - dev_kfree_skb_any(skb); - - /* Not NET_RX_DROP, this is not being dropped - * due to congestion. - */ - return 0; - } - - skb->dev->last_rx = jiffies; - - stats = &skb->dev->stats; - stats->rx_packets++; - stats->rx_bytes += skb->len; - - skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag); - switch (skb->pkt_type) { - case PACKET_BROADCAST: - break; - - case PACKET_MULTICAST: - stats->multicast++; - break; - - case PACKET_OTHERHOST: - /* Our lower layer thinks this is not local, let's make sure. - * This allows the VLAN to have a different MAC than the underlying - * device, and still route correctly. - */ - if (!compare_ether_addr(eth_hdr(skb)->h_dest, - skb->dev->dev_addr)) - skb->pkt_type = PACKET_HOST; - break; - }; - - return (polling ? netif_receive_skb(skb) : netif_rx(skb)); + BUG(); + return NET_XMIT_SUCCESS; } +#endif +/** + * vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration + * @skb: buffer + * @grp: vlan group + * @vlan_tci: VLAN TCI as received from the card + */ static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, - unsigned short vlan_tag) + u16 vlan_tci) { - return __vlan_hwaccel_rx(skb, grp, vlan_tag, 0); + return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0); } +/** + * vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration + * @skb: buffer + * @grp: vlan group + * @vlan_tci: VLAN TCI as received from the card + */ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, struct vlan_group *grp, - unsigned short vlan_tag) + u16 vlan_tci) { - return __vlan_hwaccel_rx(skb, grp, vlan_tag, 1); + return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1); } /** * __vlan_put_tag - regular VLAN tag inserting * @skb: skbuff to tag - * @tag: VLAN tag to insert + * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * + * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short tag) +static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) { struct vlan_ethhdr *veth; - if (skb_headroom(skb) < VLAN_HLEN) { - struct sk_buff *sk_tmp = skb; - skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); - kfree_skb(sk_tmp); - if (!skb) { - printk(KERN_ERR "vlan: failed to realloc headroom\n"); - return NULL; - } - } else { - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) { - printk(KERN_ERR "vlan: failed to unshare skbuff\n"); - return NULL; - } + if (skb_cow_head(skb, VLAN_HLEN) < 0) { + kfree_skb(skb); + return NULL; } - veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ @@ -275,12 +188,10 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short /* first, the ethernet type */ veth->h_vlan_proto = htons(ETH_P_8021Q); - /* now, the tag */ - veth->h_vlan_TCI = htons(tag); + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); skb->protocol = htons(ETH_P_8021Q); - skb->mac_header -= VLAN_HLEN; - skb->network_header -= VLAN_HLEN; return skb; } @@ -288,18 +199,14 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short /** * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag - * @tag: VLAN tag to insert + * @vlan_tci: VLAN TCI to insert * - * Puts the VLAN tag in @skb->cb[] and lets the device do the rest + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ -static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsigned short tag) +static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) { - struct vlan_skb_tx_cookie *cookie; - - cookie = VLAN_TX_SKB_CB(skb); - cookie->magic = VLAN_TX_COOKIE_MAGIC; - cookie->vlan_tag = tag; - + skb->vlan_tci = vlan_tci; return skb; } @@ -308,28 +215,28 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsign /** * vlan_put_tag - inserts VLAN tag according to device features * @skb: skbuff to tag - * @tag: VLAN tag to insert + * @vlan_tci: VLAN TCI to insert * * Assumes skb->dev is the target that will xmit this frame. * Returns a VLAN tagged skb. */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, unsigned short tag) +static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) { if (skb->dev->features & NETIF_F_HW_VLAN_TX) { - return __vlan_hwaccel_put_tag(skb, tag); + return __vlan_hwaccel_put_tag(skb, vlan_tci); } else { - return __vlan_put_tag(skb, tag); + return __vlan_put_tag(skb, vlan_tci); } } /** * __vlan_get_tag - get the VLAN ID that is part of the payload * @skb: skbuff to query - * @tag: buffer to store vlaue - * + * @vlan_tci: buffer to store vlaue + * * Returns error if the skb is not of VLAN type */ -static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) +static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; @@ -337,29 +244,25 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) return -EINVAL; } - *tag = ntohs(veth->h_vlan_TCI); - + *vlan_tci = ntohs(veth->h_vlan_TCI); return 0; } /** * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] * @skb: skbuff to query - * @tag: buffer to store vlaue - * - * Returns error if @skb->cb[] is not set correctly + * @vlan_tci: buffer to store vlaue + * + * Returns error if @skb->vlan_tci is not set correctly */ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, - unsigned short *tag) + u16 *vlan_tci) { - struct vlan_skb_tx_cookie *cookie; - - cookie = VLAN_TX_SKB_CB(skb); - if (cookie->magic == VLAN_TX_COOKIE_MAGIC) { - *tag = cookie->vlan_tag; + if (vlan_tx_tag_present(skb)) { + *vlan_tci = skb->vlan_tci; return 0; } else { - *tag = 0; + *vlan_tci = 0; return -EINVAL; } } @@ -369,16 +272,16 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query - * @tag: buffer to store vlaue - * + * @vlan_tci: buffer to store vlaue + * * Returns error if the skb is not VLAN tagged */ -static inline int vlan_get_tag(const struct sk_buff *skb, unsigned short *tag) +static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { if (skb->dev->features & NETIF_F_HW_VLAN_TX) { - return __vlan_hwaccel_get_tag(skb, tag); + return __vlan_hwaccel_get_tag(skb, vlan_tci); } else { - return __vlan_get_tag(skb, tag); + return __vlan_get_tag(skb, vlan_tci); } } @@ -402,6 +305,7 @@ enum vlan_ioctl_cmds { enum vlan_flags { VLAN_FLAG_REORDER_HDR = 0x1, + VLAN_FLAG_GVRP = 0x2, }; enum vlan_name_types { diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f5a1a0db2e8e..7bb3c095c15b 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -228,7 +228,6 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif); -extern void ip_mr_init(void); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); diff --git a/include/linux/ihex.h b/include/linux/ihex.h new file mode 100644 index 000000000000..2baace2788a7 --- /dev/null +++ b/include/linux/ihex.h @@ -0,0 +1,74 @@ +/* + * Compact binary representation of ihex records. Some devices need their + * firmware loaded in strange orders rather than a single big blob, but + * actually parsing ihex-as-text within the kernel seems silly. Thus,... + */ + +#ifndef __LINUX_IHEX_H__ +#define __LINUX_IHEX_H__ + +#include <linux/types.h> +#include <linux/firmware.h> +#include <linux/device.h> + +/* Intel HEX files actually limit the length to 256 bytes, but we have + drivers which would benefit from using separate records which are + longer than that, so we extend to 16 bits of length */ +struct ihex_binrec { + __be32 addr; + __be16 len; + uint8_t data[0]; +} __attribute__((aligned(4))); + +/* Find the next record, taking into account the 4-byte alignment */ +static inline const struct ihex_binrec * +ihex_next_binrec(const struct ihex_binrec *rec) +{ + int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; + rec = (void *)&rec->data[next]; + + return be16_to_cpu(rec->len) ? rec : NULL; +} + +/* Check that ihex_next_binrec() won't take us off the end of the image... */ +static inline int ihex_validate_fw(const struct firmware *fw) +{ + const struct ihex_binrec *rec; + size_t ofs = 0; + + while (ofs <= fw->size - sizeof(*rec)) { + rec = (void *)&fw->data[ofs]; + + /* Zero length marks end of records */ + if (!be16_to_cpu(rec->len)) + return 0; + + /* Point to next record... */ + ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; + } + return -EINVAL; +} + +/* Request firmware and validate it so that we can trust we won't + * run off the end while reading records... */ +static inline int request_ihex_firmware(const struct firmware **fw, + const char *fw_name, + struct device *dev) +{ + const struct firmware *lfw; + int ret; + + ret = request_firmware(&lfw, fw_name, dev); + if (ret) + return ret; + ret = ihex_validate_fw(lfw); + if (ret) { + dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", + fw_name); + release_firmware(lfw); + return ret; + } + *fw = lfw; + return 0; +} +#endif /* __LINUX_IHEX_H__ */ diff --git a/include/linux/inet.h b/include/linux/inet.h index 1354080cf8cf..4cca05c9678e 100644 --- a/include/linux/inet.h +++ b/include/linux/inet.h @@ -44,6 +44,13 @@ #include <linux/types.h> +/* + * These mimic similar macros defined in user-space for inet_ntop(3). + * See /usr/include/netinet/in.h . + */ +#define INET_ADDRSTRLEN (16) +#define INET6_ADDRSTRLEN (48) + extern __be32 in_aton(const char *str); extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9927a88674a3..93c45acf249a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -140,8 +140,8 @@ extern struct group_info init_groups; .nr_cpus_allowed = NR_CPUS, \ }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ - .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ - .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ + .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ + .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .real_parent = &tsk, \ .parent = &tsk, \ .children = LIST_HEAD_INIT(tsk.children), \ diff --git a/include/linux/input.h b/include/linux/input.h index d150c57e5f0a..a5802c9c81a4 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -373,6 +373,8 @@ struct input_absinfo { #define KEY_WIMAX 246 +/* Range 248 - 255 is reserved for special needs of AT keyboard driver */ + #define BTN_MISC 0x100 #define BTN_0 0x100 #define BTN_1 0x101 @@ -640,6 +642,8 @@ struct input_absinfo { #define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any" set = radio enabled */ #define SW_RADIO SW_RFKILL_ALL /* deprecated */ +#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ +#define SW_DOCK 0x05 /* set = plugged into dock */ #define SW_MAX 0x0f #define SW_CNT (SW_MAX+1) @@ -1215,11 +1219,6 @@ struct input_handle { struct list_head h_node; }; -#define to_dev(n) container_of(n, struct input_dev, node) -#define to_handler(n) container_of(n, struct input_handler, node) -#define to_handle(n) container_of(n, struct input_handle, d_node) -#define to_handle_h(n) container_of(n, struct input_handle, h_node) - struct input_dev *input_allocate_device(void); void input_free_device(struct input_dev *dev); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f1fc7470d26c..62aa4f895abe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -104,8 +104,11 @@ extern void enable_irq(unsigned int irq); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +extern cpumask_t irq_default_affinity; + extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); #else /* CONFIG_SMP */ @@ -119,6 +122,8 @@ static inline int irq_can_set_affinity(unsigned int irq) return 0; } +static inline int irq_select_affinity(unsigned int irq) { return 0; } + #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ #ifdef CONFIG_GENERIC_HARDIRQS @@ -285,12 +290,11 @@ enum struct softirq_action { void (*action)(struct softirq_action *); - void *data; }; asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); -extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 2b7a1187cb29..08b987bccf89 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -99,4 +99,22 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) return NULL; } +#ifdef CONFIG_BLOCK +int put_io_context(struct io_context *ioc); +void exit_io_context(void); +struct io_context *get_io_context(gfp_t gfp_flags, int node); +struct io_context *alloc_io_context(gfp_t gfp_flags, int node); +void copy_io_context(struct io_context **pdst, struct io_context **psrc); +#else +static inline void exit_io_context(void) +{ +} + +struct io_context; +static inline int put_io_context(struct io_context *ioc) +{ + return 1; +} +#endif + #endif diff --git a/include/linux/ioport.h b/include/linux/ioport.h index c6801bffe76d..2cd07cc29687 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -59,6 +59,7 @@ struct resource_list { #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) #define IORESOURCE_IRQ_LOWLEVEL (1<<3) #define IORESOURCE_IRQ_SHAREABLE (1<<4) +#define IORESOURCE_IRQ_OPTIONAL (1<<5) /* PnP DMA specific bits (IORESOURCE_BITS) */ #define IORESOURCE_DMA_TYPE_MASK (3<<0) @@ -88,6 +89,10 @@ struct resource_list { #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) +/* PnP I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IO_16BIT_ADDR (1<<0) +#define IORESOURCE_IO_FIXED (1<<1) + /* PCI ROM control bits (IORESOURCE_BITS) */ #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h index af3f4a70f3df..1e7cc4af40de 100644 --- a/include/linux/ip6_tunnel.h +++ b/include/linux/ip6_tunnel.h @@ -1,7 +1,3 @@ -/* - * $Id$ - */ - #ifndef _IP6_TUNNEL_H #define _IP6_TUNNEL_H diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index cde056e08181..641e026eee8f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -123,6 +123,7 @@ struct ipv6hdr { struct in6_addr daddr; }; +#ifdef __KERNEL__ /* * This structure contains configuration options per IPv6 link. */ @@ -163,8 +164,11 @@ struct ipv6_devconf { #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; #endif + __s32 disable_ipv6; + __s32 accept_dad; void *sysctl; }; +#endif /* index values for the variables in ipv6_devconf */ enum { @@ -194,6 +198,8 @@ enum { DEVCONF_OPTIMISTIC_DAD, DEVCONF_ACCEPT_SOURCE_ROUTE, DEVCONF_MC_FORWARDING, + DEVCONF_DISABLE_IPV6, + DEVCONF_ACCEPT_DAD, DEVCONF_MAX }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 552e0ec269c9..8ccb462ea42c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -244,15 +244,6 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) } #endif -#ifdef CONFIG_AUTO_IRQ_AFFINITY -extern int select_smp_affinity(unsigned int irq); -#else -static inline int select_smp_affinity(unsigned int irq) -{ - return 1; -} -#endif - extern int no_irq_affinity; static inline int irq_balancing_disabled(unsigned int irq) diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index e600c4e9b8c5..2b1c2e58566e 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -12,10 +12,10 @@ #define _LINUX_TRACE_IRQFLAGS_H #ifdef CONFIG_TRACE_IRQFLAGS - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); # define trace_hardirq_context(p) ((p)->hardirq_context) # define trace_softirq_context(p) ((p)->softirq_context) # define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) @@ -41,6 +41,15 @@ # define INIT_TRACE_IRQFLAGS #endif +#if defined(CONFIG_IRQSOFF_TRACER) || \ + defined(CONFIG_PREEMPT_TRACER) + extern void stop_critical_timings(void); + extern void start_critical_timings(void); +#else +# define stop_critical_timings() do { } while (0) +# define start_critical_timings() do { } while (0) +#endif + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT #include <asm/irqflags.h> diff --git a/include/linux/istallion.h b/include/linux/istallion.h index 5a84fe944b74..0d1840723249 100644 --- a/include/linux/istallion.h +++ b/include/linux/istallion.h @@ -51,25 +51,21 @@ */ struct stliport { unsigned long magic; + struct tty_port port; unsigned int portnr; unsigned int panelnr; unsigned int brdnr; unsigned long state; unsigned int devnr; - int flags; int baud_base; int custom_divisor; int close_delay; int closing_wait; - int refcount; int openwaitcnt; int rc; int argsize; void *argp; unsigned int rxmarkmsk; - struct tty_struct *tty; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; wait_queue_head_t raw_wait; struct asysigs asig; unsigned long addr; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d147f0f90360..3dd209007098 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -168,6 +168,8 @@ struct commit_header { unsigned char h_chksum_size; unsigned char h_padding[2]; __be32 h_chksum[JBD2_CHECKSUM_BYTES]; + __be64 h_commit_sec; + __be32 h_commit_nsec; }; /* @@ -379,6 +381,38 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) bit_spin_unlock(BH_JournalHead, &bh->b_state); } +/* Flags in jbd_inode->i_flags */ +#define __JI_COMMIT_RUNNING 0 +/* Commit of the inode data in progress. We use this flag to protect us from + * concurrent deletion of inode. We cannot use reference to inode for this + * since we cannot afford doing last iput() on behalf of kjournald + */ +#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) + +/** + * struct jbd_inode is the structure linking inodes in ordered mode + * present in a transaction so that we can sync them during commit. + */ +struct jbd2_inode { + /* Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] */ + transaction_t *i_transaction; + + /* Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] */ + transaction_t *i_next_transaction; + + /* List of inodes in the i_transaction [j_list_lock] */ + struct list_head i_list; + + /* VFS inode this inode belongs to [constant during the lifetime + * of the structure] */ + struct inode *i_vfs_inode; + + /* Flags of inode [j_list_lock] */ + unsigned int i_flags; +}; + struct jbd2_revoke_table_s; /** @@ -509,24 +543,12 @@ struct transaction_s struct journal_head *t_reserved_list; /* - * Doubly-linked circular list of all buffers under writeout during - * commit [j_list_lock] - */ - struct journal_head *t_locked_list; - - /* * Doubly-linked circular list of all metadata buffers owned by this * transaction [j_list_lock] */ struct journal_head *t_buffers; /* - * Doubly-linked circular list of all data buffers still to be - * flushed before this transaction can be committed [j_list_lock] - */ - struct journal_head *t_sync_datalist; - - /* * Doubly-linked circular list of all forget buffers (superseded * buffers which we can un-checkpoint once this transaction commits) * [j_list_lock] @@ -565,6 +587,12 @@ struct transaction_s struct journal_head *t_log_list; /* + * List of inodes whose data we've modified in data=ordered mode. + * [j_list_lock] + */ + struct list_head t_inode_list; + + /* * Protects info related to handles */ spinlock_t t_handle_lock; @@ -1004,7 +1032,6 @@ extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); -extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); @@ -1044,6 +1071,10 @@ extern void jbd2_journal_ack_err (journal_t *); extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); +extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size); +extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); +extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); /* * journal_head management @@ -1179,15 +1210,13 @@ static inline int jbd_space_needed(journal_t *journal) /* journaling buffer types */ #define BJ_None 0 /* Not journaled */ -#define BJ_SyncData 1 /* Normal data: flush before commit */ -#define BJ_Metadata 2 /* Normal journaled metadata */ -#define BJ_Forget 3 /* Buffer superseded by this transaction */ -#define BJ_IO 4 /* Buffer is for temporary IO use */ -#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */ -#define BJ_LogCtl 6 /* Buffer contains log descriptors */ -#define BJ_Reserved 7 /* Buffer is reserved for access by journal */ -#define BJ_Locked 8 /* Locked for I/O during commit */ -#define BJ_Types 9 +#define BJ_Metadata 1 /* Normal journaled metadata */ +#define BJ_Forget 2 /* Buffer superseded by this transaction */ +#define BJ_IO 3 /* Buffer is for temporary IO use */ +#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */ +#define BJ_LogCtl 5 /* Buffer contains log descriptors */ +#define BJ_Reserved 6 /* Buffer is reserved for access by journal */ +#define BJ_Types 7 extern int jbd_blocks_per_page(struct inode *inode); diff --git a/include/linux/joystick.h b/include/linux/joystick.h index e2d3a18af456..b5e051295a67 100644 --- a/include/linux/joystick.h +++ b/include/linux/joystick.h @@ -2,8 +2,6 @@ #define _LINUX_JOYSTICK_H /* - * $Id: joystick.h,v 1.3 2000/11/30 11:07:05 vojtech Exp $ - * * Copyright (C) 1996-2000 Vojtech Pavlik * * Sponsored by SuSE diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2e70006c7fa8..f9cd7a513f9c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -187,9 +187,6 @@ asmlinkage int vprintk(const char *fmt, va_list args) __attribute__ ((format (printf, 1, 0))); asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __cold; -extern int log_buf_get_len(void); -extern int log_buf_read(int idx); -extern int log_buf_copy(char *dest, int idx, int len); extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; @@ -205,9 +202,6 @@ static inline int vprintk(const char *s, va_list args) { return 0; } static inline int printk(const char *s, ...) __attribute__ ((format (printf, 1, 2))); static inline int __cold printk(const char *s, ...) { return 0; } -static inline int log_buf_get_len(void) { return 0; } -static inline int log_buf_read(int idx) { return 0; } -static inline int log_buf_copy(char *dest, int idx, int len) { return 0; } static inline int printk_ratelimit(void) { return 0; } static inline int __printk_ratelimit(int ratelimit_jiffies, \ int ratelimit_burst) { return 0; } @@ -216,7 +210,7 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ { return false; } #endif -extern void __attribute__((format(printf, 1, 2))) +extern void asmlinkage __attribute__((format(printf, 1, 2))) early_printk(const char *fmt, ...); unsigned long int_sqrt(unsigned long); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index e8ffce898bf9..cf9f40a91c9c 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -1,11 +1,11 @@ #ifndef _LINUX_KERNEL_STAT_H #define _LINUX_KERNEL_STAT_H -#include <asm/irq.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/percpu.h> #include <linux/cpumask.h> +#include <asm/irq.h> #include <asm/cputime.h> /* diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 5dc13848891b..0509c4ce4857 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -25,15 +25,16 @@ #define KMOD_PATH_LEN 256 -#ifdef CONFIG_KMOD +#ifdef CONFIG_MODULES /* modprobe exit status on success, -ve on error. Return value * usually useless though. */ extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); +#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) #else static inline int request_module(const char * name, ...) { return -ENOSYS; } +#define try_then_request_module(x, mod...) (x) #endif -#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) struct key; struct file; diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 39e709f88aa0..60f0d418ae32 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -26,7 +26,6 @@ #include <linux/wait.h> #include <asm/atomic.h> -#define KOBJ_NAME_LEN 20 #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 32 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ @@ -59,12 +58,12 @@ enum kobject_action { struct kobject { const char *name; - struct kref kref; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct sysfs_dirent *sd; + struct kref kref; unsigned int state_initialized:1; unsigned int state_in_sysfs:1; unsigned int state_add_uevent_sent:1; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 1036631ff4fa..04a3556bdea6 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -259,6 +259,10 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); struct jprobe; struct kretprobe; +static inline struct kprobe *get_kprobe(void *addr) +{ + return NULL; +} static inline struct kprobe *kprobe_running(void) { return NULL; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index a281afeddfbb..0ea064cbfbc8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -173,6 +173,30 @@ struct kvm_run { }; }; +/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */ + +struct kvm_coalesced_mmio_zone { + __u64 addr; + __u32 size; + __u32 pad; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + __u32 pad; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first, last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +#define KVM_COALESCED_MMIO_MAX \ + ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \ + sizeof(struct kvm_coalesced_mmio)) + /* for KVM_TRANSLATE */ struct kvm_translation { /* in */ @@ -294,14 +318,14 @@ struct kvm_trace_rec { __u32 vcpu_id; union { struct { - __u32 cycle_lo, cycle_hi; + __u64 cycle_u64; __u32 extra_u32[KVM_TRC_EXTRA_MAX]; } cycle; struct { __u32 extra_u32[KVM_TRC_EXTRA_MAX]; } nocycle; } u; -}; +} __attribute__((packed)); #define KVMIO 0xAE @@ -346,6 +370,7 @@ struct kvm_trace_rec { #define KVM_CAP_NOP_IO_DELAY 12 #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 +#define KVM_CAP_COALESCED_MMIO 15 /* * ioctls for VM fds @@ -371,6 +396,10 @@ struct kvm_trace_rec { #define KVM_CREATE_PIT _IO(KVMIO, 0x64) #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) +#define KVM_REGISTER_COALESCED_MMIO \ + _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) +#define KVM_UNREGISTER_COALESCED_MMIO \ + _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) /* * ioctls for vcpu fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index de9d1df4bba2..07d68a8ae8e9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -52,7 +52,8 @@ struct kvm_io_bus { void kvm_io_bus_init(struct kvm_io_bus *bus); void kvm_io_bus_destroy(struct kvm_io_bus *bus); -struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); +struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, + gpa_t addr, int len, int is_write); void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev); @@ -116,6 +117,10 @@ struct kvm { struct kvm_vm_stat stat; struct kvm_arch arch; atomic_t users_count; +#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET + struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; +#endif }; /* The guest did something we don't support. */ @@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); -void decache_vcpus_on_cpu(int cpu); - - int kvm_init(void *opaque, unsigned int vcpu_size, struct module *module); void kvm_exit(void); @@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc); +void kvm_arch_flush_shadow(struct kvm *kvm); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/libata.h b/include/linux/libata.h index e57e5d08312d..5b247b8a6b3b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -27,6 +27,7 @@ #define __LINUX_LIBATA_H__ #include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> @@ -115,7 +116,7 @@ enum { /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ ATA_MAX_QUEUE = 32, ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, - ATA_SHORT_PAUSE = (HZ >> 6) + 1, + ATA_SHORT_PAUSE = 16, ATAPI_MAX_DRAIN = 16 << 10, @@ -168,6 +169,7 @@ enum { ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ + ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ @@ -190,6 +192,10 @@ enum { ATA_FLAG_AN = (1 << 18), /* controller supports AN */ ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ + ATA_FLAG_EM = (1 << 21), /* driver supports enclosure + * management */ + ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity + * led */ /* The following flag belongs to ap->pflags but is kept in * ap->flags because it's referenced in many LLDs and will be @@ -234,17 +240,16 @@ enum { /* bits 24:31 of host->flags are reserved for LLD specific flags */ /* various lengths of time */ - ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ - ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ - ATA_TMOUT_INTERNAL = 30 * HZ, - ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, + ATA_TMOUT_BOOT = 30000, /* heuristic */ + ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ + ATA_TMOUT_INTERNAL_QUICK = 5000, /* FIXME: GoVault needs 2s but we can't afford that without * parallel probing. 800ms is enough for iVDR disk * HHD424020F7SV00. Increase to 2secs when parallel probing * is in place. */ - ATA_TMOUT_FF_WAIT = 4 * HZ / 5, + ATA_TMOUT_FF_WAIT = 800, /* Spec mandates to wait for ">= 2ms" before checking status * after reset. We wait 150ms, because that was the magic @@ -256,14 +261,14 @@ enum { * * Old drivers/ide uses the 2mS rule and then waits for ready. */ - ATA_WAIT_AFTER_RESET_MSECS = 150, + ATA_WAIT_AFTER_RESET = 150, /* If PMP is supported, we have to do follow-up SRST. As some * PMPs don't send D2H Reg FIS after hardreset, LLDs are * advised to wait only for the following duration before * doing SRST. */ - ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, + ATA_TMOUT_PMP_SRST_WAIT = 1000, /* ATA bus states */ BUS_UNKNOWN = 0, @@ -340,6 +345,11 @@ enum { SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ + /* This should match the actual table size of + * ata_eh_cmd_timeout_table in libata-eh.c. + */ + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5, + /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependant */ @@ -441,6 +451,15 @@ enum link_pm { MEDIUM_POWER, }; extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_em_message_type; +extern struct device_attribute dev_attr_em_message; +extern struct device_attribute dev_attr_sw_activity; + +enum sw_activity { + OFF, + BLINK_ON, + BLINK_OFF, +}; #ifdef CONFIG_ATA_SFF struct ata_ioports { @@ -597,10 +616,14 @@ struct ata_eh_info { struct ata_eh_context { struct ata_eh_info i; int tries[ATA_MAX_DEVICES]; + int cmd_timeout_idx[ATA_MAX_DEVICES] + [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; unsigned int classes[ATA_MAX_DEVICES]; unsigned int did_probe_mask; unsigned int saved_ncq_enabled; u8 saved_xfer_mode[ATA_MAX_DEVICES]; + /* timestamp for the last reset attempt or success */ + unsigned long last_reset; }; struct ata_acpi_drive @@ -692,6 +715,7 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + int em_message_type; void *private_data; #ifdef CONFIG_ATA_ACPI @@ -783,6 +807,12 @@ struct ata_port_operations { u8 (*bmdma_status)(struct ata_port *ap); #endif /* CONFIG_ATA_SFF */ + ssize_t (*em_show)(struct ata_port *ap, char *buf); + ssize_t (*em_store)(struct ata_port *ap, const char *message, + size_t size); + ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); + ssize_t (*sw_activity_store)(struct ata_device *dev, + enum sw_activity val); /* * Obsolete */ @@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host); #endif extern int ata_ratelimit(void); extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, - unsigned long interval_msec, - unsigned long timeout_msec); + unsigned long interval, unsigned long timeout); extern int atapi_cmd_type(u8 opcode); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); @@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status) return 0; } +static inline unsigned long ata_deadline(unsigned long from_jiffies, + unsigned long timeout_msecs) +{ + return from_jiffies + msecs_to_jiffies(timeout_msecs); +} + /************************************************************************** * PMP - drivers/ata/libata-pmp.c diff --git a/include/linux/libps2.h b/include/linux/libps2.h index f6f301e2b0f5..afc413369101 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -43,7 +43,6 @@ void ps2_init(struct ps2dev *ps2dev, struct serio *serio); int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); -int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int command); int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data); void ps2_cmd_aborted(struct ps2dev *ps2dev); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 2119610b24f8..56ba37394656 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -1,8 +1,11 @@ #ifndef _LINUX_LINKAGE_H #define _LINUX_LINKAGE_H +#include <linux/compiler.h> #include <asm/linkage.h> +#define notrace __attribute__((no_instrument_function)) + #ifdef __cplusplus #define CPP_ASMLINKAGE extern "C" #else @@ -17,6 +20,9 @@ # define asmregparm #endif +#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) + /* * This is used by architectures to keep arguments on the stack * untouched by the compiler by keeping them live until the end. diff --git a/include/linux/list.h b/include/linux/list.h index 08cf4f651889..139ec41d9c2e 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -85,65 +85,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) } /* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __list_add_rcu(struct list_head * new, - struct list_head * prev, struct list_head * next) -{ - new->next = next; - new->prev = prev; - smp_wmb(); - next->prev = new; - prev->next = new; -} - -/** - * list_add_rcu - add a new entry to rcu-protected list - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as list_add_rcu() - * or list_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * list_for_each_entry_rcu(). - */ -static inline void list_add_rcu(struct list_head *new, struct list_head *head) -{ - __list_add_rcu(new, head, head->next); -} - -/** - * list_add_tail_rcu - add a new entry to rcu-protected list - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as list_add_tail_rcu() - * or list_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * list_for_each_entry_rcu(). - */ -static inline void list_add_tail_rcu(struct list_head *new, - struct list_head *head) -{ - __list_add_rcu(new, head->prev, head); -} - -/* * Delete a list entry by making the prev/next entries * point to each other. * @@ -174,36 +115,6 @@ extern void list_del(struct list_head *entry); #endif /** - * list_del_rcu - deletes entry from list without re-initialization - * @entry: the element to delete from the list. - * - * Note: list_empty() on entry does not return true after this, - * the entry is in an undefined state. It is useful for RCU based - * lockfree traversal. - * - * In particular, it means that we can not poison the forward - * pointers that may still be used for walking the list. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as list_del_rcu() - * or list_add_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * list_for_each_entry_rcu(). - * - * Note that the caller is not permitted to immediately free - * the newly deleted entry. Instead, either synchronize_rcu() - * or call_rcu() must be used to defer freeing until an RCU - * grace period has elapsed. - */ -static inline void list_del_rcu(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); - entry->prev = LIST_POISON2; -} - -/** * list_replace - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert @@ -227,25 +138,6 @@ static inline void list_replace_init(struct list_head *old, } /** - * list_replace_rcu - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * - * The @old entry will be replaced with the @new entry atomically. - * Note: @old should not be empty. - */ -static inline void list_replace_rcu(struct list_head *old, - struct list_head *new) -{ - new->next = old->next; - new->prev = old->prev; - smp_wmb(); - new->next->prev = new; - new->prev->next = new; - old->prev = LIST_POISON2; -} - -/** * list_del_init - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. */ @@ -369,62 +261,6 @@ static inline void list_splice_init(struct list_head *list, } /** - * list_splice_init_rcu - splice an RCU-protected list into an existing list. - * @list: the RCU-protected list to splice - * @head: the place in the list to splice the first list into - * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... - * - * @head can be RCU-read traversed concurrently with this function. - * - * Note that this function blocks. - * - * Important note: the caller must take whatever action is necessary to - * prevent any other updates to @head. In principle, it is possible - * to modify the list as soon as sync() begins execution. - * If this sort of thing becomes necessary, an alternative version - * based on call_rcu() could be created. But only if -really- - * needed -- there is no shortage of RCU API members. - */ -static inline void list_splice_init_rcu(struct list_head *list, - struct list_head *head, - void (*sync)(void)) -{ - struct list_head *first = list->next; - struct list_head *last = list->prev; - struct list_head *at = head->next; - - if (list_empty(head)) - return; - - /* "first" and "last" tracking list, so initialize it. */ - - INIT_LIST_HEAD(list); - - /* - * At this point, the list body still points to the source list. - * Wait for any readers to finish using the list before splicing - * the list body into the new list. Any new readers will see - * an empty list. - */ - - sync(); - - /* - * Readers are finished with the source list, so perform splice. - * The order is important if the new list is global and accessible - * to concurrent RCU readers. Note that RCU readers are not - * permitted to traverse the prev pointers without excluding - * this function. - */ - - last->next = at; - smp_wmb(); - head->next = first; - first->prev = head; - at->prev = last; -} - -/** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. @@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list, &pos->member != (head); \ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) -/** - * list_for_each_rcu - iterate over an rcu-protected list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_rcu(pos, head) \ - for (pos = rcu_dereference((head)->next); \ - prefetch(pos->next), pos != (head); \ - pos = rcu_dereference(pos->next)) - -#define __list_for_each_rcu(pos, head) \ - for (pos = rcu_dereference((head)->next); \ - pos != (head); \ - pos = rcu_dereference(pos->next)) - -/** - * list_for_each_entry_rcu - iterate over rcu list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) - - -/** - * list_for_each_continue_rcu - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * Iterate over an rcu-protected list, continuing after current point. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_continue_rcu(pos, head) \ - for ((pos) = rcu_dereference((pos)->next); \ - prefetch((pos)->next), (pos) != (head); \ - (pos) = rcu_dereference((pos)->next)) - /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is @@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n) n->pprev = LIST_POISON2; } -/** - * hlist_del_rcu - deletes entry from hash list without re-initialization - * @n: the element to delete from the hash list. - * - * Note: list_unhashed() on entry does not return true after this, - * the entry is in an undefined state. It is useful for RCU based - * lockfree traversal. - * - * In particular, it means that we can not poison the forward - * pointers that may still be used for walking the hash list. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_add_head_rcu() - * or hlist_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_for_each_entry(). - */ -static inline void hlist_del_rcu(struct hlist_node *n) -{ - __hlist_del(n); - n->pprev = LIST_POISON2; -} - static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { @@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n) } } -/** - * hlist_replace_rcu - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * - * The @old entry will be replaced with the @new entry atomically. - */ -static inline void hlist_replace_rcu(struct hlist_node *old, - struct hlist_node *new) -{ - struct hlist_node *next = old->next; - - new->next = next; - new->pprev = old->pprev; - smp_wmb(); - if (next) - new->next->pprev = &new->next; - *new->pprev = new; - old->pprev = LIST_POISON2; -} - static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; @@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n->pprev = &h->first; } - -/** - * hlist_add_head_rcu - * @n: the element to add to the hash list. - * @h: the list to add to. - * - * Description: - * Adds the specified element to the specified hlist, - * while permitting racing traversals. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_add_head_rcu() - * or hlist_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_for_each_entry_rcu(), used to prevent memory-consistency - * problems on Alpha CPUs. Regardless of the type of CPU, the - * list-traversal primitive must be guarded by rcu_read_lock(). - */ -static inline void hlist_add_head_rcu(struct hlist_node *n, - struct hlist_head *h) -{ - struct hlist_node *first = h->first; - n->next = first; - n->pprev = &h->first; - smp_wmb(); - if (first) - first->pprev = &n->next; - h->first = n; -} - /* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) @@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n, next->next->pprev = &next->next; } -/** - * hlist_add_before_rcu - * @n: the new element to add to the hash list. - * @next: the existing element to add the new element before. - * - * Description: - * Adds the specified element to the specified hlist - * before the specified node while permitting racing traversals. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_add_head_rcu() - * or hlist_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_for_each_entry_rcu(), used to prevent memory-consistency - * problems on Alpha CPUs. - */ -static inline void hlist_add_before_rcu(struct hlist_node *n, - struct hlist_node *next) -{ - n->pprev = next->pprev; - n->next = next; - smp_wmb(); - next->pprev = &n->next; - *(n->pprev) = n; -} - -/** - * hlist_add_after_rcu - * @prev: the existing element to add the new element after. - * @n: the new element to add to the hash list. - * - * Description: - * Adds the specified element to the specified hlist - * after the specified node while permitting racing traversals. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_add_head_rcu() - * or hlist_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_for_each_entry_rcu(), used to prevent memory-consistency - * problems on Alpha CPUs. - */ -static inline void hlist_add_after_rcu(struct hlist_node *prev, - struct hlist_node *n) -{ - n->next = prev->next; - n->pprev = &prev->next; - smp_wmb(); - prev->next = n; - if (n->next) - n->next->pprev = &n->next; -} - #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ @@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = n) -/** - * hlist_for_each_entry_rcu - iterate over rcu list of given type - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct hlist_node to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the hlist_node within the struct. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as hlist_add_head_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ - for (pos = rcu_dereference((head)->first); \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = rcu_dereference(pos->next)) - #endif diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h index f274997bc283..2ed8fa1b762b 100644 --- a/include/linux/lm_interface.h +++ b/include/linux/lm_interface.h @@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); */ #define LM_OUT_ST_MASK 0x00000003 -#define LM_OUT_CACHEABLE 0x00000004 #define LM_OUT_CANCELED 0x00000008 #define LM_OUT_ASYNC 0x00000080 #define LM_OUT_ERROR 0x00000100 -#define LM_OUT_CONV_DEADLK 0x00000200 /* * lm_callback_t types @@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); * LM_CB_NEED_RECOVERY * The given journal needs to be recovered. * - * LM_CB_DROPLOCKS - * Reduce the number of cached locks. - * * LM_CB_ASYNC * The given lock has been granted. */ @@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); #define LM_CB_NEED_D 258 #define LM_CB_NEED_S 259 #define LM_CB_NEED_RECOVERY 260 -#define LM_CB_DROPLOCKS 261 #define LM_CB_ASYNC 262 /* diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 102d928f7206..dbb87ab282e8 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -200,10 +200,12 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); * Server-side lock handling */ __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, - struct nlm_lock *, int, struct nlm_cookie *); + struct nlm_host *, struct nlm_lock *, int, + struct nlm_cookie *); __be32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *); __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, - struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); + struct nlm_host *, struct nlm_lock *, + struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); unsigned long nlmsvc_retry_blocked(void); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, @@ -224,7 +226,7 @@ void nlmsvc_invalidate_all(void); * Cluster failover support */ int nlmsvc_unlock_all_by_sb(struct super_block *sb); -int nlmsvc_unlock_all_by_ip(__be32 server_addr); +int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 4c4d236ded18..2486eb4edbf1 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -182,6 +182,9 @@ struct lock_list { * We record lock dependency chains, so that we can cache them: */ struct lock_chain { + u8 irq_context; + u8 depth; + u16 base; struct list_head entry; u64 chain_key; }; @@ -276,14 +279,6 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, (lock)->dep_map.key, sub) /* - * To initialize a lockdep_map statically use this macro. - * Note that _name must not be NULL. - */ -#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ - { .name = (_name), .key = (void *)(_key), } - - -/* * Acquire a lock. * * Values for "read": diff --git a/include/linux/marker.h b/include/linux/marker.h index 430f6adf9762..1290653f9241 100644 --- a/include/linux/marker.h +++ b/include/linux/marker.h @@ -44,8 +44,8 @@ struct marker { */ char state; /* Marker state. */ char ptype; /* probe type : 0 : single, 1 : multi */ - void (*call)(const struct marker *mdata, /* Probe wrapper */ - void *call_private, const char *fmt, ...); + /* Probe wrapper */ + void (*call)(const struct marker *mdata, void *call_private, ...); struct marker_probe_closure single; struct marker_probe_closure *multi; } __attribute__((aligned(8))); @@ -58,8 +58,12 @@ struct marker { * Make sure the alignment of the structure in the __markers section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. + * + * The "generic" argument controls which marker enabling mechanism must be used. + * If generic is true, a variable read is used. + * If generic is false, immediate values are used. */ -#define __trace_mark(name, call_private, format, args...) \ +#define __trace_mark(generic, name, call_private, format, args...) \ do { \ static const char __mstrtab_##name[] \ __attribute__((section("__markers_strings"))) \ @@ -72,15 +76,14 @@ struct marker { __mark_check_format(format, ## args); \ if (unlikely(__mark_##name.state)) { \ (*__mark_##name.call) \ - (&__mark_##name, call_private, \ - format, ## args); \ + (&__mark_##name, call_private, ## args);\ } \ } while (0) extern void marker_update_probe_range(struct marker *begin, struct marker *end); #else /* !CONFIG_MARKERS */ -#define __trace_mark(name, call_private, format, args...) \ +#define __trace_mark(generic, name, call_private, format, args...) \ __mark_check_format(format, ## args) static inline void marker_update_probe_range(struct marker *begin, struct marker *end) @@ -88,15 +91,30 @@ static inline void marker_update_probe_range(struct marker *begin, #endif /* CONFIG_MARKERS */ /** - * trace_mark - Marker + * trace_mark - Marker using code patching * @name: marker name, not quoted. * @format: format string * @args...: variable argument list * - * Places a marker. + * Places a marker using optimized code patching technique (imv_read()) + * to be enabled when immediate values are present. */ #define trace_mark(name, format, args...) \ - __trace_mark(name, NULL, format, ## args) + __trace_mark(0, name, NULL, format, ## args) + +/** + * _trace_mark - Marker using variable read + * @name: marker name, not quoted. + * @format: format string + * @args...: variable argument list + * + * Places a marker using a standard memory read (_imv_read()) to be + * enabled. Should be used for markers in code paths where instruction + * modification based enabling is not welcome. (__init and __exit functions, + * lockdep, some traps, printk). + */ +#define _trace_mark(name, format, args...) \ + __trace_mark(1, name, NULL, format, ## args) /** * MARK_NOARGS - Format string for a marker with no argument. @@ -117,9 +135,9 @@ static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...) extern marker_probe_func __mark_empty_function; extern void marker_probe_cb(const struct marker *mdata, - void *call_private, const char *fmt, ...); + void *call_private, ...); extern void marker_probe_cb_noarg(const struct marker *mdata, - void *call_private, const char *fmt, ...); + void *call_private, ...); /* * Connect a probe to a marker. diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h index 4ab2162db13b..322cd6deb9f0 100644 --- a/include/linux/mfd/asic3.h +++ b/include/linux/mfd/asic3.h @@ -8,7 +8,7 @@ * published by the Free Software Foundation. * * Copyright 2001 Compaq Computer Corporation. - * Copyright 2007 OpendHand. + * Copyright 2007-2008 OpenedHand Ltd. */ #ifndef __ASIC3_H__ @@ -16,43 +16,22 @@ #include <linux/types.h> -struct asic3 { - void __iomem *mapping; - unsigned int bus_shift; - unsigned int irq_nr; - unsigned int irq_base; - spinlock_t lock; - u16 irq_bothedge[4]; - struct device *dev; -}; - struct asic3_platform_data { - struct { - u32 dir; - u32 init; - u32 sleep_mask; - u32 sleep_out; - u32 batt_fault_out; - u32 sleep_conf; - u32 alt_function; - } gpio_a, gpio_b, gpio_c, gpio_d; - - unsigned int bus_shift; + u16 *gpio_config; + unsigned int gpio_config_num; unsigned int irq_base; - struct platform_device **children; - unsigned int n_children; + unsigned int gpio_base; }; -int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio); -void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); - #define ASIC3_NUM_GPIO_BANKS 4 #define ASIC3_GPIOS_PER_BANK 16 #define ASIC3_NUM_GPIOS 64 #define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 +#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) + #define ASIC3_GPIO_BANK_A 0 #define ASIC3_GPIO_BANK_B 1 #define ASIC3_GPIO_BANK_C 2 @@ -64,32 +43,89 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); /* All offsets below are specified with this address bus shift */ #define ASIC3_DEFAULT_ADDR_SHIFT 2 -#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_Base + ASIC3_##base##_##reg) +#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg) #define ASIC3_GPIO_OFFSET(base, reg) \ - (ASIC3_GPIO_##base##_Base + ASIC3_GPIO_##reg) - -#define ASIC3_GPIO_A_Base 0x0000 -#define ASIC3_GPIO_B_Base 0x0100 -#define ASIC3_GPIO_C_Base 0x0200 -#define ASIC3_GPIO_D_Base 0x0300 - -#define ASIC3_GPIO_Mask 0x00 /* R/W 0:don't mask */ -#define ASIC3_GPIO_Direction 0x04 /* R/W 0:input */ -#define ASIC3_GPIO_Out 0x08 /* R/W 0:output low */ -#define ASIC3_GPIO_TriggerType 0x0c /* R/W 0:level */ -#define ASIC3_GPIO_EdgeTrigger 0x10 /* R/W 0:falling */ -#define ASIC3_GPIO_LevelTrigger 0x14 /* R/W 0:low level detect */ -#define ASIC3_GPIO_SleepMask 0x18 /* R/W 0:don't mask in sleep mode */ -#define ASIC3_GPIO_SleepOut 0x1c /* R/W level 0:low in sleep mode */ -#define ASIC3_GPIO_BattFaultOut 0x20 /* R/W level 0:low in batt_fault */ -#define ASIC3_GPIO_IntStatus 0x24 /* R/W 0:none, 1:detect */ -#define ASIC3_GPIO_AltFunction 0x28 /* R/W 1:LED register control */ -#define ASIC3_GPIO_SleepConf 0x2c /* + (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg) + +#define ASIC3_GPIO_A_BASE 0x0000 +#define ASIC3_GPIO_B_BASE 0x0100 +#define ASIC3_GPIO_C_BASE 0x0200 +#define ASIC3_GPIO_D_BASE 0x0300 + +#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4) +#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \ + (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4))) +#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio)) +#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100)) +#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100)) + +#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */ +#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */ +#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */ +#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */ +#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */ +#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */ +#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */ +#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */ +#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */ +#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */ +#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */ +#define ASIC3_GPIO_SLEEP_CONF 0x2c /* * R/W bit 1: autosleep * 0: disable gposlpout in normal mode, * enable gposlpout in sleep mode. */ -#define ASIC3_GPIO_Status 0x30 /* R Pin status */ +#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */ + +/* + * ASIC3 GPIO config + * + * Bits 0..6 gpio number + * Bits 7..13 Alternate function + * Bit 14 Direction + * Bit 15 Initial value + * + */ +#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f) +#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7) +#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14) +#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15) +#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \ + | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \ + | (((init) & 0x1) << 15)) +#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init)) +#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, 1, (init)) + +/* + * Alternate functions + */ +#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) +#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) +#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) +#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 1, 0) +#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 1, 0) +#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 1, 0) +#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) +#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) +#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) +#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0) +#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0) +#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0) +#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0) +#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0) +#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0) +#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0) +#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0) +#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0) +#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0) +#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0) +#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0) +#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0) + #define ASIC3_SPI_Base 0x0400 #define ASIC3_SPI_Control 0x0000 @@ -128,7 +164,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); #define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */ #define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ -#define ASIC3_CLOCK_Base 0x0A00 +#define ASIC3_CLOCK_BASE 0x0A00 #define ASIC3_CLOCK_CDEX 0x00 #define ASIC3_CLOCK_SEL 0x04 @@ -159,12 +195,12 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); #define CLOCK_SEL_CX (1 << 2) -#define ASIC3_INTR_Base 0x0B00 +#define ASIC3_INTR_BASE 0x0B00 -#define ASIC3_INTR_IntMask 0x00 /* Interrupt mask control */ -#define ASIC3_INTR_PIntStat 0x04 /* Peripheral interrupt status */ -#define ASIC3_INTR_IntCPS 0x08 /* Interrupt timer clock pre-scale */ -#define ASIC3_INTR_IntTBS 0x0c /* Interrupt timer set */ +#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */ +#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */ +#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */ +#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */ #define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */ #define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */ @@ -227,44 +263,12 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); #define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */ /********************************************* - * The Onewire interface registers - * - * OWM_CMD - * OWM_DAT - * OWM_INTR - * OWM_INTEN - * OWM_CLKDIV + * The Onewire interface (DS1WM) is handled + * by the ds1wm driver. * *********************************************/ -#define ASIC3_OWM_Base 0xC00 - -#define ASIC3_OWM_CMD 0x00 -#define ASIC3_OWM_DAT 0x04 -#define ASIC3_OWM_INTR 0x08 -#define ASIC3_OWM_INTEN 0x0C -#define ASIC3_OWM_CLKDIV 0x10 - -#define ASIC3_OWM_CMD_ONEWR (1 << 0) -#define ASIC3_OWM_CMD_SRA (1 << 1) -#define ASIC3_OWM_CMD_DQO (1 << 2) -#define ASIC3_OWM_CMD_DQI (1 << 3) - -#define ASIC3_OWM_INTR_PD (1 << 0) -#define ASIC3_OWM_INTR_PDR (1 << 1) -#define ASIC3_OWM_INTR_TBE (1 << 2) -#define ASIC3_OWM_INTR_TEMP (1 << 3) -#define ASIC3_OWM_INTR_RBF (1 << 4) - -#define ASIC3_OWM_INTEN_EPD (1 << 0) -#define ASIC3_OWM_INTEN_IAS (1 << 1) -#define ASIC3_OWM_INTEN_ETBE (1 << 2) -#define ASIC3_OWM_INTEN_ETMT (1 << 3) -#define ASIC3_OWM_INTEN_ERBF (1 << 4) - -#define ASIC3_OWM_CLKDIV_PRE (3 << 0) /* two bits wide at bit 0 */ -#define ASIC3_OWM_CLKDIV_DIV (7 << 2) /* 3 bits wide at bit 2 */ - +#define ASIC3_OWM_BASE 0xC00 /***************************************************************************** * The SD configuration registers are at a completely different location @@ -492,6 +496,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val); #define ASIC3_SDIO_CTRL_LEDCtrl 0x7C #define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0 -#define ASIC3_MAP_SIZE 0x2000 +#define ASIC3_MAP_SIZE_32BIT 0x2000 +#define ASIC3_MAP_SIZE_16BIT 0x1000 #endif /* __ASIC3_H__ */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 000000000000..bb3dd0545928 --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,55 @@ +#ifndef MFD_CORE_H +#define MFD_CORE_H +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/platform_device.h> + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + void *driver_data; /* driver-specific data */ + + /* + * This resources can be specified relatievly to the parent device. + * For accessing device you should use resources from device + */ + int num_resources; + const struct resource *resources; +}; + +static inline struct mfd_cell * +mfd_get_cell(struct platform_device *pdev) +{ + return (struct mfd_cell *)pdev->dev.platform_data; +} + +extern int mfd_add_devices( + struct platform_device *parent, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); + +extern void mfd_remove_devices(struct platform_device *parent); + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 000000000000..7cc824a58f7c --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,49 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton <spyro@f2s.com> + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TC6393XB_H +#define TC6393XB_H + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + u32 scr_gpo_doecr; /* GPO Data OE Control */ + u32 scr_gpo_dsr; /* GPO Data Set */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* a base for cascaded irq */ + int gpio_base; + + struct tmio_nand_data *nand_data; +}; + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_SERIAL 3 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 000000000000..9438d8c9ac1c --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,17 @@ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; +}; + +#define TMIO_NAND_CONFIG "tmio-nand-config" +#define TMIO_NAND_CONTROL "tmio-nand-control" +#define TMIO_NAND_IRQ "tmio-nand" + +#endif diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a744383d16e9..81b3dd5206e0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); -int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, diff --git a/include/linux/mm.h b/include/linux/mm.h index 586a943cab01..2128ef7780c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -108,6 +108,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ +#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -998,8 +999,8 @@ extern void free_area_init_node(int nid, pg_data_t *pgdat, extern void free_area_init_nodes(unsigned long *max_zone_pfn); extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); -extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, - unsigned long new_end_pfn); +extern void remove_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_all_active_ranges(void); @@ -1011,6 +1012,8 @@ extern unsigned long find_min_pfn_with_active_regions(void); extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); +typedef int (*work_fn_t)(unsigned long, unsigned long, void *); +extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID extern int early_pfn_to_nid(unsigned long pfn); @@ -1024,6 +1027,7 @@ extern void mem_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); +extern int after_bootmem; #ifdef CONFIG_NUMA extern void setup_per_cpu_pageset(void); diff --git a/include/linux/mman.h b/include/linux/mman.h index dab8892e6ff1..30d1073bac3b 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -34,6 +34,32 @@ static inline void vm_unacct_memory(long pages) } /* + * Allow architectures to handle additional protection bits + */ + +#ifndef arch_calc_vm_prot_bits +#define arch_calc_vm_prot_bits(prot) 0 +#endif + +#ifndef arch_vm_get_page_prot +#define arch_vm_get_page_prot(vm_flags) __pgprot(0) +#endif + +#ifndef arch_validate_prot +/* + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have + * already been masked out. + * + * Returns true if the prot flags are valid + */ +static inline int arch_validate_prot(unsigned long prot) +{ + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; +} +#define arch_validate_prot arch_validate_prot +#endif + +/* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 * but this version is faster. @@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | - _calc_vm_trans(prot, PROT_EXEC, VM_EXEC ); + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | + arch_calc_vm_prot_bits(prot); } /* diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index d0c3abed74c2..143cebf0586f 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -135,6 +135,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); +extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7ab962fa1d73..10a2080086ca 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -51,8 +51,30 @@ struct mmc_ios { struct mmc_host_ops { void (*request)(struct mmc_host *host, struct mmc_request *req); + /* + * Avoid calling these three functions too often or in a "fast path", + * since underlaying controller might implement them in an expensive + * and/or slow way. + * + * Also note that these functions might sleep, so don't call them + * in the atomic contexts! + * + * Return values for the get_ro callback should be: + * 0 for a read/write card + * 1 for a read-only card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + * + * Return values for the get_ro callback should be: + * 0 for a absent card + * 1 for a present card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + */ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); int (*get_ro)(struct mmc_host *host); + int (*get_cd)(struct mmc_host *host); + void (*enable_sdio_irq)(struct mmc_host *host, int enable); }; @@ -89,11 +111,11 @@ struct mmc_host { unsigned long caps; /* Host capabilities */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ -#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ -#define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */ -#define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */ -#define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */ -#define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */ +#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ +#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ +#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ +#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ +#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 4236fbf0b6fb..14b81f3e5232 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -16,7 +16,6 @@ * Based strongly on code by: * * Author: Yong-iL Joh <tolkien@mizi.com> - * Date : $Date: 2002/06/18 12:37:30 $ * * Author: Andrew Christian * 15 May 2002 diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index b050f4d7b41f..07bee4a0d457 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -1,7 +1,7 @@ /* * include/linux/mmc/sdio_func.h * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -46,6 +46,8 @@ struct sdio_func { unsigned max_blksize; /* maximum block size */ unsigned cur_blksize; /* current block size */ + unsigned enable_timeout; /* max enable timeout in msec */ + unsigned int state; /* function state */ #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ @@ -120,23 +122,22 @@ extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz); extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); extern int sdio_release_irq(struct sdio_func *func); -extern unsigned char sdio_readb(struct sdio_func *func, - unsigned int addr, int *err_ret); -extern unsigned short sdio_readw(struct sdio_func *func, - unsigned int addr, int *err_ret); -extern unsigned long sdio_readl(struct sdio_func *func, - unsigned int addr, int *err_ret); +extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); + +extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, unsigned int addr, int count); extern int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr, int count); -extern void sdio_writeb(struct sdio_func *func, unsigned char b, +extern void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret); -extern void sdio_writew(struct sdio_func *func, unsigned short b, +extern void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret); -extern void sdio_writel(struct sdio_func *func, unsigned long b, +extern void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret); extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h new file mode 100644 index 000000000000..61d19e1b7a0b --- /dev/null +++ b/include/linux/mmiotrace.h @@ -0,0 +1,85 @@ +#ifndef MMIOTRACE_H +#define MMIOTRACE_H + +#include <linux/types.h> +#include <linux/list.h> + +struct kmmio_probe; +struct pt_regs; + +typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, + struct pt_regs *, unsigned long addr); +typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, + unsigned long condition, struct pt_regs *); + +struct kmmio_probe { + struct list_head list; /* kmmio internal list */ + unsigned long addr; /* start location of the probe point */ + unsigned long len; /* length of the probe region */ + kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */ + kmmio_post_handler_t post_handler; /* Called after addr is executed */ + void *private; +}; + +/* kmmio is active by some kmmio_probes? */ +static inline int is_kmmio_active(void) +{ + extern unsigned int kmmio_count; + return kmmio_count; +} + +extern int register_kmmio_probe(struct kmmio_probe *p); +extern void unregister_kmmio_probe(struct kmmio_probe *p); + +/* Called from page fault handler. */ +extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); + +/* Called from ioremap.c */ +#ifdef CONFIG_MMIOTRACE +extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, + void __iomem *addr); +extern void mmiotrace_iounmap(volatile void __iomem *addr); +#else +static inline void mmiotrace_ioremap(resource_size_t offset, + unsigned long size, void __iomem *addr) +{ +} + +static inline void mmiotrace_iounmap(volatile void __iomem *addr) +{ +} +#endif /* CONFIG_MMIOTRACE_HOOKS */ + +enum mm_io_opcode { + MMIO_READ = 0x1, /* struct mmiotrace_rw */ + MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ + MMIO_PROBE = 0x3, /* struct mmiotrace_map */ + MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ + MMIO_MARKER = 0x5, /* raw char data */ + MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */ +}; + +struct mmiotrace_rw { + resource_size_t phys; /* PCI address of register */ + unsigned long value; + unsigned long pc; /* optional program counter */ + int map_id; + unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ + unsigned char width; /* size of register access in bytes */ +}; + +struct mmiotrace_map { + resource_size_t phys; /* base address in PCI space */ + unsigned long virt; /* base virtual address */ + unsigned long len; /* mapping size */ + int map_id; + unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ +}; + +/* in kernel/trace/trace_mmiotrace.c */ +extern void enable_mmiotrace(void); +extern void disable_mmiotrace(void); +extern void mmio_trace_rw(struct mmiotrace_rw *rw); +extern void mmio_trace_mapping(struct mmiotrace_map *map); + +#endif /* MMIOTRACE_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 69b2342d5ebb..c4db5827963d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -159,6 +159,15 @@ struct ap_device_id { #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 +/* s390 css bus devices (subchannels) */ +struct css_device_id { + __u8 match_flags; + __u8 type; /* subchannel type */ + __u16 pad2; + __u32 pad3; + kernel_ulong_t driver_data; +}; + #define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ /* to workaround crosscompile issues */ diff --git a/include/linux/module.h b/include/linux/module.h index 3e03b1acbc94..fce15ebd0e1c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -249,27 +249,30 @@ struct module /* Exported symbols */ const struct kernel_symbol *syms; - unsigned int num_syms; const unsigned long *crcs; + unsigned int num_syms; /* GPL-only exported symbols. */ - const struct kernel_symbol *gpl_syms; unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; +#ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ const struct kernel_symbol *unused_syms; - unsigned int num_unused_syms; const unsigned long *unused_crcs; + unsigned int num_unused_syms; + /* GPL-only, unused exported symbols. */ - const struct kernel_symbol *unused_gpl_syms; unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; +#endif /* symbols that will be GPL-only in the near future. */ const struct kernel_symbol *gpl_future_syms; - unsigned int num_gpl_future_syms; const unsigned long *gpl_future_crcs; + unsigned int num_gpl_future_syms; /* Exception table */ unsigned int num_exentries; @@ -285,10 +288,10 @@ struct module void *module_core; /* Here are the sizes of the init and core sections */ - unsigned long init_size, core_size; + unsigned int init_size, core_size; /* The size of the executable code in each section. */ - unsigned long init_text_size, core_text_size; + unsigned int init_text_size, core_text_size; /* The handle returned from unwind_add_table. */ void *unwind_info; @@ -300,29 +303,15 @@ struct module #ifdef CONFIG_GENERIC_BUG /* Support for BUG */ + unsigned num_bugs; struct list_head bug_list; struct bug_entry *bug_table; - unsigned num_bugs; -#endif - -#ifdef CONFIG_MODULE_UNLOAD - /* Reference counts */ - struct module_ref ref[NR_CPUS]; - - /* What modules depend on me? */ - struct list_head modules_which_use_me; - - /* Who is waiting for us to be unloaded */ - struct task_struct *waiter; - - /* Destruction function. */ - void (*exit)(void); #endif #ifdef CONFIG_KALLSYMS /* We keep the symbol and string tables for kallsyms. */ Elf_Sym *symtab; - unsigned long num_symtab; + unsigned int num_symtab; char *strtab; /* Section attributes */ @@ -342,6 +331,21 @@ struct module struct marker *markers; unsigned int num_markers; #endif + +#ifdef CONFIG_MODULE_UNLOAD + /* What modules depend on me? */ + struct list_head modules_which_use_me; + + /* Who is waiting for us to be unloaded */ + struct task_struct *waiter; + + /* Destruction function. */ + void (*exit)(void); + + /* Reference counts */ + struct module_ref ref[NR_CPUS]; +#endif + }; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 068a0c9946af..5c42821da2d1 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -11,11 +11,21 @@ */ #ifdef CONFIG_BLOCK +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; + unsigned use_writepage; +}; + struct writeback_control; +struct bio *mpage_bio_submit(int rw, struct bio *bio); int mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); +int __mpage_writepage(struct page *page, struct writeback_control *wbc, + void *data); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); int mpage_writepage(struct page *page, get_block_t *get_block, diff --git a/include/linux/mroute.h b/include/linux/mroute.h index de4decfa1bfc..07112ee9293a 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -144,11 +144,37 @@ static inline int ip_mroute_opt(int opt) } #endif +#ifdef CONFIG_IP_MROUTE extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int); extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern void ip_mr_init(void); +extern int ip_mr_init(void); +#else +static inline +int ip_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, int optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip_mroute_getsockopt(struct sock *sock, + int optname, char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} +static inline +int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip_mr_init(void) +{ + return 0; +} +#endif struct vif_device { diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index e7989593142b..5cf50473a10f 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -131,11 +131,44 @@ static inline int ip6_mroute_opt(int opt) struct sock; +#ifdef CONFIG_IPV6_MROUTE extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern void ip6_mr_init(void); +extern int ip6_mr_init(void); +extern void ip6_mr_cleanup(void); +#else +static inline +int ip6_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, int optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6_mroute_getsockopt(struct sock *sock, + int optname, char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip6_mr_init(void) +{ + return 0; +} + +static inline void ip6_mr_cleanup(void) +{ + return; +} +#endif struct mif_device { diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 85e3939cf487..aa30244492c6 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -188,7 +188,7 @@ typedef union { */ struct map_info { - char *name; + const char *name; unsigned long size; resource_size_t phys; #define NO_XIP (-1UL) diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 31ed234b2a74..4ed40caff4e5 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -119,7 +119,7 @@ struct mtd_info { u_int32_t oobavail; // Available OOB bytes per block // Kernel-only stuff starts here. - char *name; + const char *name; int index; /* ecc layout structure pointer - read only ! */ diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index f71201d0f3e7..6316fafe5c2a 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -45,13 +45,13 @@ enum { * @size: how many physical eraseblocks are reserved for this volume * @used_bytes: how many bytes of data this volume contains * @used_ebs: how many physical eraseblocks of this volume actually contain any - * data + * data * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) * @corrupted: non-zero if the volume is corrupted (static volumes only) * @upd_marker: non-zero if the volume has update marker set * @alignment: volume alignment * @usable_leb_size: how many bytes are available in logical eraseblocks of - * this volume + * this volume * @name_len: volume name length * @name: volume name * @cdev: UBI volume character device major and minor numbers @@ -152,6 +152,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); /* * This function is the same as the 'ubi_leb_read()' function, but it does not diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index a15cdd4a8e58..12078577aef6 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -17,30 +17,59 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; - unsigned int t_clk; + unsigned int t_clk; }; struct mv643xx_eth_platform_data { + /* + * Pointer back to our parent instance, and our port number. + */ struct platform_device *shared; - int port_number; + int port_number; + /* + * Whether a PHY is present, and if yes, at which address. + */ struct platform_device *shared_smi; + int force_phy_addr; + int phy_addr; - u16 force_phy_addr; /* force override if phy_addr == 0 */ - u16 phy_addr; - - /* If speed is 0, then speed and duplex are autonegotiated. */ - int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ - int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ - - /* non-zero values of the following fields override defaults */ - u32 tx_queue_size; - u32 rx_queue_size; - u32 tx_sram_addr; - u32 tx_sram_size; - u32 rx_sram_addr; - u32 rx_sram_size; - u8 mac_addr[6]; /* mac address if non-zero*/ + /* + * Use this MAC address if it is valid, overriding the + * address that is already in the hardware. + */ + u8 mac_addr[6]; + + /* + * If speed is 0, autonegotiation is enabled. + * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000. + * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL. + */ + int speed; + int duplex; + + /* + * Which RX/TX queues to use. + */ + int rx_queue_mask; + int tx_queue_mask; + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * Use on-chip SRAM for RX/TX descriptors if size is nonzero + * and sufficient to contain all descriptors for the requested + * ring sizes. + */ + unsigned long rx_sram_addr; + int rx_sram_size; + unsigned long tx_sram_addr; + int tx_sram_size; }; -#endif /* __LINUX_MV643XX_ETH_H */ + +#endif diff --git a/include/linux/net.h b/include/linux/net.h index 71f7dd559285..150a48c68d52 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -106,23 +106,23 @@ enum sock_shutdown_cmd { /** * struct socket - general BSD socket * @state: socket state (%SS_CONNECTED, etc) + * @type: socket type (%SOCK_STREAM, etc) * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) * @ops: protocol specific socket operations * @fasync_list: Asynchronous wake up list * @file: File back pointer for gc * @sk: internal networking protocol agnostic socket representation * @wait: wait queue for several uses - * @type: socket type (%SOCK_STREAM, etc) */ struct socket { socket_state state; + short type; unsigned long flags; const struct proto_ops *ops; struct fasync_struct *fasync_list; struct file *file; struct sock *sk; wait_queue_head_t wait; - short type; }; struct vm_area_struct; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 25f87102ab66..b4d056ceab96 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -118,14 +118,6 @@ struct wireless_dev; #endif /* __KERNEL__ */ -struct net_device_subqueue -{ - /* Give a control state for each queue. This struct may contain - * per-queue locks in the future. - */ - unsigned long state; -}; - /* * Network device statistics. Akin to the 2.0 ether stats but * with byte counters. @@ -281,14 +273,11 @@ struct header_ops { enum netdev_state_t { - __LINK_STATE_XOFF=0, __LINK_STATE_START, __LINK_STATE_PRESENT, - __LINK_STATE_SCHED, __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, - __LINK_STATE_QDISC_RUNNING, }; @@ -448,6 +437,20 @@ static inline void napi_synchronize(const struct napi_struct *n) # define napi_synchronize(n) barrier() #endif +enum netdev_queue_state_t +{ + __QUEUE_STATE_XOFF, +}; + +struct netdev_queue { + struct net_device *dev; + struct Qdisc *qdisc; + unsigned long state; + spinlock_t _xmit_lock; + int xmit_lock_owner; + struct Qdisc *qdisc_sleeping; +} ____cacheline_aligned_in_smp; + /* * The DEVICE structure. * Actually, this whole structure is a big mistake. It mixes I/O @@ -516,7 +519,6 @@ struct net_device #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ /* do not use LLTX in new drivers */ #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ -#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ #define NETIF_F_LRO 32768 /* large receive offload */ /* Segmentation offload features */ @@ -537,8 +539,6 @@ struct net_device #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) - struct net_device *next_sched; - /* Interface index. Unique device identifier */ int ifindex; int iflink; @@ -594,13 +594,14 @@ struct net_device unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ + spinlock_t addr_list_lock; struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ int uc_count; /* Number of installed ucasts */ int uc_promisc; struct dev_addr_list *mc_list; /* Multicast mac addresses */ int mc_count; /* Number of installed mcasts */ - int promiscuity; - int allmulti; + unsigned int promiscuity; + unsigned int allmulti; /* Protocol specific pointers */ @@ -624,32 +625,21 @@ struct net_device unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ - /* ingress path synchronizer */ - spinlock_t ingress_lock; - struct Qdisc *qdisc_ingress; + struct netdev_queue rx_queue; -/* - * Cache line mostly used on queue transmit path (qdisc) - */ - /* device queue lock */ - spinlock_t queue_lock ____cacheline_aligned_in_smp; - struct Qdisc *qdisc; - struct Qdisc *qdisc_sleeping; - struct list_head qdisc_list; - unsigned long tx_queue_len; /* Max frames per queue allowed */ + struct netdev_queue *_tx ____cacheline_aligned_in_smp; + + /* Number of TX queues allocated at alloc_netdev_mq() time */ + unsigned int num_tx_queues; + + /* Number of TX queues currently active in device */ + unsigned int real_num_tx_queues; - /* Partially transmitted GSO packet. */ - struct sk_buff *gso_skb; + unsigned long tx_queue_len; /* Max frames per queue allowed */ /* * One part is mostly used on xmit path (device) */ - /* hard_start_xmit synchronizer */ - spinlock_t _xmit_lock ____cacheline_aligned_in_smp; - /* cpu id of processor entered to hard_start_xmit or -1, - if nobody entered there. - */ - int xmit_lock_owner; void *priv; /* pointer to private data */ int (*hard_start_xmit) (struct sk_buff *skb, struct net_device *dev); @@ -728,6 +718,9 @@ struct net_device void (*poll_controller)(struct net_device *dev); #endif + u16 (*select_queue)(struct net_device *dev, + struct sk_buff *skb); + #ifdef CONFIG_NET_NS /* Network namespace this network device is inside */ struct net *nd_net; @@ -740,6 +733,8 @@ struct net_device struct net_bridge_port *br_port; /* macvlan */ struct macvlan_port *macvlan_port; + /* GARP */ + struct garp_port *garp_port; /* class/net/name entry */ struct device dev; @@ -755,16 +750,31 @@ struct net_device /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; - - /* The TX queue control structures */ - unsigned int egress_subqueue_count; - struct net_device_subqueue egress_subqueue[1]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) #define NETDEV_ALIGN 32 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) +static inline +struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, + unsigned int index) +{ + return &dev->_tx[index]; +} + +static inline void netdev_for_each_tx_queue(struct net_device *dev, + void (*f)(struct net_device *, + struct netdev_queue *, + void *), + void *arg) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + f(dev, &dev->_tx[i], arg); +} + /* * Net namespace inlines */ @@ -795,7 +805,9 @@ void dev_net_set(struct net_device *dev, struct net *net) */ static inline void *netdev_priv(const struct net_device *dev) { - return dev->priv; + return (char *)dev + ((sizeof(struct net_device) + + NETDEV_ALIGN_CONST) + & ~NETDEV_ALIGN_CONST); } /* Set the sysfs physical device reference for the network logical device @@ -830,6 +842,19 @@ static inline void netif_napi_add(struct net_device *dev, set_bit(NAPI_STATE_SCHED, &napi->state); } +/** + * netif_napi_del - remove a napi context + * @napi: napi context + * + * netif_napi_del() removes a napi context from the network device napi list + */ +static inline void netif_napi_del(struct napi_struct *napi) +{ +#ifdef CONFIG_NETPOLL + list_del(&napi->dev_list); +#endif +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -890,6 +915,7 @@ extern struct net_device *__dev_get_by_name(struct net *net, const char *name); extern int dev_alloc_name(struct net_device *dev, const char *name); extern int dev_open(struct net_device *dev); extern int dev_close(struct net_device *dev); +extern void dev_disable_lro(struct net_device *dev); extern int dev_queue_xmit(struct sk_buff *skb); extern int register_netdevice(struct net_device *dev); extern void unregister_netdevice(struct net_device *dev); @@ -939,7 +965,7 @@ static inline int unregister_gifconf(unsigned int family) */ struct softnet_data { - struct net_device *output_queue; + struct Qdisc *output_queue; struct sk_buff_head input_pkt_queue; struct list_head poll_list; struct sk_buff *completion_queue; @@ -954,12 +980,25 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); #define HAVE_NETIF_QUEUE -extern void __netif_schedule(struct net_device *dev); +extern void __netif_schedule(struct Qdisc *q); -static inline void netif_schedule(struct net_device *dev) +static inline void netif_schedule_queue(struct netdev_queue *txq) { - if (!test_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); +} + +static inline void netif_tx_schedule_all(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netif_schedule_queue(netdev_get_tx_queue(dev, i)); +} + +static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +{ + clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -970,7 +1009,29 @@ static inline void netif_schedule(struct net_device *dev) */ static inline void netif_start_queue(struct net_device *dev) { - clear_bit(__LINK_STATE_XOFF, &dev->state); + netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_start_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_start_queue(txq); + } +} + +static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) { + clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + return; + } +#endif + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) + __netif_schedule(dev_queue->qdisc); } /** @@ -982,14 +1043,22 @@ static inline void netif_start_queue(struct net_device *dev) */ static inline void netif_wake_queue(struct net_device *dev) { -#ifdef CONFIG_NETPOLL_TRAP - if (netpoll_trap()) { - clear_bit(__LINK_STATE_XOFF, &dev->state); - return; + netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_wake_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_wake_queue(txq); } -#endif - if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); +} + +static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +{ + set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -1001,7 +1070,22 @@ static inline void netif_wake_queue(struct net_device *dev) */ static inline void netif_stop_queue(struct net_device *dev) { - set_bit(__LINK_STATE_XOFF, &dev->state); + netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_stop_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_stop_queue(txq); + } +} + +static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); } /** @@ -1012,7 +1096,7 @@ static inline void netif_stop_queue(struct net_device *dev) */ static inline int netif_queue_stopped(const struct net_device *dev) { - return test_bit(__LINK_STATE_XOFF, &dev->state); + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } /** @@ -1042,9 +1126,8 @@ static inline int netif_running(const struct net_device *dev) */ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); -#endif + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + clear_bit(__QUEUE_STATE_XOFF, &txq->state); } /** @@ -1056,13 +1139,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) */ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return; #endif - set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); -#endif + set_bit(__QUEUE_STATE_XOFF, &txq->state); } /** @@ -1075,12 +1157,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) static inline int __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - return test_bit(__LINK_STATE_XOFF, - &dev->egress_subqueue[queue_index].state); -#else - return 0; -#endif + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + return test_bit(__QUEUE_STATE_XOFF, &txq->state); } static inline int netif_subqueue_stopped(const struct net_device *dev, @@ -1098,15 +1176,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, */ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return; #endif - if (test_and_clear_bit(__LINK_STATE_XOFF, - &dev->egress_subqueue[queue_index].state)) - __netif_schedule(dev); -#endif + if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); } /** @@ -1114,15 +1190,10 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) * @dev: network device * * Check if device has multiple transmit queues - * Always falls if NETDEVICE_MULTIQUEUE is not configured */ static inline int netif_is_multiqueue(const struct net_device *dev) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE - return (!!(NETIF_F_MULTI_QUEUE & dev->features)); -#else - return 0; -#endif + return (dev->num_tx_queues > 1); } /* Use this variant when it is known for sure that it @@ -1142,6 +1213,7 @@ extern int netif_rx(struct sk_buff *skb); extern int netif_rx_ni(struct sk_buff *skb); #define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); +extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ethtool(struct net *net, struct ifreq *); @@ -1154,7 +1226,8 @@ extern int dev_set_mtu(struct net_device *, int); extern int dev_set_mac_address(struct net_device *, struct sockaddr *); extern int dev_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev); + struct net_device *dev, + struct netdev_queue *txq); extern int netdev_budget; @@ -1390,6 +1463,18 @@ static inline void netif_rx_complete(struct net_device *dev, local_irq_restore(flags); } +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + txq->xmit_lock_owner = cpu; +} + +static inline void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + txq->xmit_lock_owner = smp_processor_id(); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1397,62 +1482,109 @@ static inline void netif_rx_complete(struct net_device *dev, * * Get network device transmit lock */ -static inline void __netif_tx_lock(struct net_device *dev, int cpu) -{ - spin_lock(&dev->_xmit_lock); - dev->xmit_lock_owner = cpu; -} - static inline void netif_tx_lock(struct net_device *dev) { - __netif_tx_lock(dev, smp_processor_id()); + int cpu = smp_processor_id(); + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + __netif_tx_lock(txq, cpu); + } } static inline void netif_tx_lock_bh(struct net_device *dev) { - spin_lock_bh(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + local_bh_disable(); + netif_tx_lock(dev); } -static inline int netif_tx_trylock(struct net_device *dev) +static inline int __netif_tx_trylock(struct netdev_queue *txq) { - int ok = spin_trylock(&dev->_xmit_lock); + int ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - dev->xmit_lock_owner = smp_processor_id(); + txq->xmit_lock_owner = smp_processor_id(); return ok; } +static inline int netif_tx_trylock(struct net_device *dev) +{ + return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + static inline void netif_tx_unlock(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock(&dev->_xmit_lock); + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + __netif_tx_unlock(txq); + } + } static inline void netif_tx_unlock_bh(struct net_device *dev) { - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->_xmit_lock); + netif_tx_unlock(dev); + local_bh_enable(); } -#define HARD_TX_LOCK(dev, cpu) { \ +#define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(dev, cpu); \ + __netif_tx_lock(txq, cpu); \ } \ } -#define HARD_TX_UNLOCK(dev) { \ +#define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - netif_tx_unlock(dev); \ + __netif_tx_unlock(txq); \ } \ } static inline void netif_tx_disable(struct net_device *dev) { + unsigned int i; + netif_tx_lock_bh(dev); - netif_stop_queue(dev); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_stop_queue(txq); + } netif_tx_unlock_bh(dev); } +static inline void netif_addr_lock(struct net_device *dev) +{ + spin_lock(&dev->addr_list_lock); +} + +static inline void netif_addr_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock(struct net_device *dev) +{ + spin_unlock(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock_bh(struct net_device *dev) +{ + spin_unlock_bh(&dev->addr_list_lock); +} + /* These functions live elsewhere (drivers/net/net_init.c, but related) */ extern void ether_setup(struct net_device *dev); @@ -1480,9 +1612,10 @@ extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *ad extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); -extern void dev_set_promiscuity(struct net_device *dev, int inc); -extern void dev_set_allmulti(struct net_device *dev, int inc); +extern int dev_set_promiscuity(struct net_device *dev, int inc); +extern int dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); +extern void netdev_bonding_change(struct net_device *dev); extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); @@ -1509,6 +1642,11 @@ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); extern void dev_seq_stop(struct seq_file *seq, void *v); #endif +extern int netdev_class_create_file(struct class_attribute *class_attr); +extern void netdev_class_remove_file(struct class_attribute *class_attr); + +extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); + extern void linkwatch_run_queue(void); extern int netdev_compute_features(unsigned long all, unsigned long one); diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index bad1eb760f61..885cbe282260 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -122,7 +122,7 @@ enum ip_conntrack_events IPCT_NATINFO_BIT = 10, IPCT_NATINFO = (1 << IPCT_NATINFO_BIT), - /* Counter highest bit has been set */ + /* Counter highest bit has been set, unused */ IPCT_COUNTER_FILLING_BIT = 11, IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT), @@ -145,12 +145,6 @@ enum ip_conntrack_expect_events { }; #ifdef __KERNEL__ -struct ip_conntrack_counter -{ - u_int32_t packets; - u_int32_t bytes; -}; - struct ip_conntrack_stat { unsigned int searched; diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 0a383ac083cb..c19595c89304 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -81,6 +81,7 @@ enum ctattr_protoinfo { CTA_PROTOINFO_UNSPEC, CTA_PROTOINFO_TCP, CTA_PROTOINFO_DCCP, + CTA_PROTOINFO_SCTP, __CTA_PROTOINFO_MAX }; #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) @@ -103,12 +104,21 @@ enum ctattr_protoinfo_dccp { }; #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) +enum ctattr_protoinfo_sctp { + CTA_PROTOINFO_SCTP_UNSPEC, + CTA_PROTOINFO_SCTP_STATE, + CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, + CTA_PROTOINFO_SCTP_VTAG_REPLY, + __CTA_PROTOINFO_SCTP_MAX +}; +#define CTA_PROTOINFO_SCTP_MAX (__CTA_PROTOINFO_SCTP_MAX - 1) + enum ctattr_counters { CTA_COUNTERS_UNSPEC, - CTA_COUNTERS_PACKETS, /* old 64bit counters */ - CTA_COUNTERS_BYTES, /* old 64bit counters */ - CTA_COUNTERS32_PACKETS, - CTA_COUNTERS32_BYTES, + CTA_COUNTERS_PACKETS, /* 64bit counters */ + CTA_COUNTERS_BYTES, /* 64bit counters */ + CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */ + CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */ __CTA_COUNTERS_MAX }; #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h index a85721332924..f661731f3cb1 100644 --- a/include/linux/netfilter/nfnetlink_log.h +++ b/include/linux/netfilter/nfnetlink_log.h @@ -48,6 +48,9 @@ enum nfulnl_attr_type { NFULA_SEQ, /* instance-local sequence number */ NFULA_SEQ_GLOBAL, /* global sequence number */ NFULA_GID, /* group id of socket */ + NFULA_HWTYPE, /* hardware type */ + NFULA_HWHEADER, /* hardware header */ + NFULA_HWLEN, /* hardware header length */ __NFULA_MAX }; diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h index bb21dd1aee2d..8a6ba7bbef9f 100644 --- a/include/linux/netfilter/xt_string.h +++ b/include/linux/netfilter/xt_string.h @@ -4,6 +4,11 @@ #define XT_STRING_MAX_PATTERN_SIZE 128 #define XT_STRING_MAX_ALGO_NAME_SIZE 16 +enum { + XT_STRING_FLAG_INVERT = 0x01, + XT_STRING_FLAG_IGNORECASE = 0x02 +}; + struct xt_string_info { u_int16_t from_offset; @@ -11,7 +16,15 @@ struct xt_string_info char algo[XT_STRING_MAX_ALGO_NAME_SIZE]; char pattern[XT_STRING_MAX_PATTERN_SIZE]; u_int8_t patlen; - u_int8_t invert; + union { + struct { + u_int8_t invert; + } v0; + + struct { + u_int8_t flags; + } v1; + } u; /* Used internally by the kernel */ struct ts_config __attribute__((aligned(8))) *config; diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h new file mode 100644 index 000000000000..2273c3ae33ca --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_ip6.h @@ -0,0 +1,40 @@ +/* + * ebt_ip6 + * + * Authors: + * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> + * Manohar Castelino <manohar.r.castelino@intel.com> + * + * Jan 11, 2008 + * + */ + +#ifndef __LINUX_BRIDGE_EBT_IP6_H +#define __LINUX_BRIDGE_EBT_IP6_H + +#define EBT_IP6_SOURCE 0x01 +#define EBT_IP6_DEST 0x02 +#define EBT_IP6_TCLASS 0x04 +#define EBT_IP6_PROTO 0x08 +#define EBT_IP6_SPORT 0x10 +#define EBT_IP6_DPORT 0x20 +#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\ + EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT) +#define EBT_IP6_MATCH "ip6" + +/* the same values are used for the invflags */ +struct ebt_ip6_info +{ + struct in6_addr saddr; + struct in6_addr daddr; + struct in6_addr smsk; + struct in6_addr dmsk; + uint8_t tclass; + uint8_t protocol; + uint8_t bitmask; + uint8_t invflags; + uint16_t sport[2]; + uint16_t dport[2]; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h index 96e231ae7554..b76e653157e5 100644 --- a/include/linux/netfilter_bridge/ebt_log.h +++ b/include/linux/netfilter_bridge/ebt_log.h @@ -4,7 +4,8 @@ #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */ #define EBT_LOG_ARP 0x02 #define EBT_LOG_NFLOG 0x04 -#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP) +#define EBT_LOG_IP6 0x08 +#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP | EBT_LOG_IP6) #define EBT_LOG_PREFIX_SIZE 30 #define EBT_LOG_WATCHER "log" diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 650318b0c405..29c7727ff0e8 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -60,6 +60,7 @@ enum nf_ip_hook_priorities { NF_IP_PRI_MANGLE = -150, NF_IP_PRI_NAT_DST = -100, NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, NF_IP_PRI_NAT_SRC = 100, NF_IP_PRI_SELINUX_LAST = 225, NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 3475a65dae9b..d654873aa25a 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -64,11 +64,14 @@ enum nf_ip6_hook_priorities { NF_IP6_PRI_MANGLE = -150, NF_IP6_PRI_NAT_DST = -100, NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, NF_IP6_PRI_NAT_SRC = 100, NF_IP6_PRI_SELINUX_LAST = 225, NF_IP6_PRI_LAST = INT_MAX, }; +#ifdef __KERNEL__ + #ifdef CONFIG_NETFILTER extern int ip6_route_me_harder(struct sk_buff *skb); extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, @@ -81,4 +84,6 @@ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } #endif /* CONFIG_NETFILTER */ +#endif /* __KERNEL__ */ + #endif /*__LINUX_IP6_NETFILTER_H*/ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index bec1062a25a1..9ff1b54908f3 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -193,7 +193,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); -int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 8726491de154..ea0366769484 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -65,9 +65,6 @@ #define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010 #define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020 #define NFS4_ACE_IDENTIFIER_GROUP 0x00000040 -#define NFS4_ACE_OWNER 0x00000080 -#define NFS4_ACE_GROUP 0x00000100 -#define NFS4_ACE_EVERYONE 0x00000200 #define NFS4_ACE_READ_DATA 0x00000001 #define NFS4_ACE_LIST_DIRECTORY 0x00000001 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 27d6a8d98cef..29d261918734 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -12,9 +12,19 @@ #include <linux/magic.h> /* Default timeout values */ +#define NFS_DEF_UDP_TIMEO (11) +#define NFS_DEF_UDP_RETRANS (3) +#define NFS_DEF_TCP_TIMEO (600) +#define NFS_DEF_TCP_RETRANS (2) + #define NFS_MAX_UDP_TIMEOUT (60*HZ) #define NFS_MAX_TCP_TIMEOUT (600*HZ) +#define NFS_DEF_ACREGMIN (3) +#define NFS_DEF_ACREGMAX (60) +#define NFS_DEF_ACDIRMIN (30) +#define NFS_DEF_ACDIRMAX (60) + /* * When flushing a cluster of dirty pages, there can be different * strategies: diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h new file mode 100644 index 000000000000..1cb9a3fed2b3 --- /dev/null +++ b/include/linux/nfs_iostat.h @@ -0,0 +1,119 @@ +/* + * User-space visible declarations for NFS client per-mount + * point statistics + * + * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com> + * + * NFS client per-mount statistics provide information about the + * health of the NFS client and the health of each NFS mount point. + * Generally these are not for detailed problem diagnosis, but + * simply to indicate that there is a problem. + * + * These counters are not meant to be human-readable, but are meant + * to be integrated into system monitoring tools such as "sar" and + * "iostat". As such, the counters are sampled by the tools over + * time, and are never zeroed after a file system is mounted. + * Moving averages can be computed by the tools by taking the + * difference between two instantaneous samples and dividing that + * by the time between the samples. + */ + +#ifndef _LINUX_NFS_IOSTAT +#define _LINUX_NFS_IOSTAT + +#define NFS_IOSTAT_VERS "1.0" + +/* + * NFS byte counters + * + * 1. SERVER - the number of payload bytes read from or written + * to the server by the NFS client via an NFS READ or WRITE + * request. + * + * 2. NORMAL - the number of bytes read or written by applications + * via the read(2) and write(2) system call interfaces. + * + * 3. DIRECT - the number of bytes read or written from files + * opened with the O_DIRECT flag. + * + * These counters give a view of the data throughput into and out + * of the NFS client. Comparing the number of bytes requested by + * an application with the number of bytes the client requests from + * the server can provide an indication of client efficiency + * (per-op, cache hits, etc). + * + * These counters can also help characterize which access methods + * are in use. DIRECT by itself shows whether there is any O_DIRECT + * traffic. NORMAL + DIRECT shows how much data is going through + * the system call interface. A large amount of SERVER traffic + * without much NORMAL or DIRECT traffic shows that applications + * are using mapped files. + * + * NFS page counters + * + * These count the number of pages read or written via nfs_readpage(), + * nfs_readpages(), or their write equivalents. + * + * NB: When adding new byte counters, please include the measured + * units in the name of each byte counter to help users of this + * interface determine what exactly is being counted. + */ +enum nfs_stat_bytecounters { + NFSIOS_NORMALREADBYTES = 0, + NFSIOS_NORMALWRITTENBYTES, + NFSIOS_DIRECTREADBYTES, + NFSIOS_DIRECTWRITTENBYTES, + NFSIOS_SERVERREADBYTES, + NFSIOS_SERVERWRITTENBYTES, + NFSIOS_READPAGES, + NFSIOS_WRITEPAGES, + __NFSIOS_BYTESMAX, +}; + +/* + * NFS event counters + * + * These counters provide a low-overhead way of monitoring client + * activity without enabling NFS trace debugging. The counters + * show the rate at which VFS requests are made, and how often the + * client invalidates its data and attribute caches. This allows + * system administrators to monitor such things as how close-to-open + * is working, and answer questions such as "why are there so many + * GETATTR requests on the wire?" + * + * They also count anamolous events such as short reads and writes, + * silly renames due to close-after-delete, and operations that + * change the size of a file (such operations can often be the + * source of data corruption if applications aren't using file + * locking properly). + */ +enum nfs_stat_eventcounters { + NFSIOS_INODEREVALIDATE = 0, + NFSIOS_DENTRYREVALIDATE, + NFSIOS_DATAINVALIDATE, + NFSIOS_ATTRINVALIDATE, + NFSIOS_VFSOPEN, + NFSIOS_VFSLOOKUP, + NFSIOS_VFSACCESS, + NFSIOS_VFSUPDATEPAGE, + NFSIOS_VFSREADPAGE, + NFSIOS_VFSREADPAGES, + NFSIOS_VFSWRITEPAGE, + NFSIOS_VFSWRITEPAGES, + NFSIOS_VFSGETDENTS, + NFSIOS_VFSSETATTR, + NFSIOS_VFSFLUSH, + NFSIOS_VFSFSYNC, + NFSIOS_VFSLOCK, + NFSIOS_VFSRELEASE, + NFSIOS_CONGESTIONWAIT, + NFSIOS_SETATTRTRUNC, + NFSIOS_EXTENDWRITE, + NFSIOS_SILLYRENAME, + NFSIOS_SHORTREAD, + NFSIOS_SHORTWRITE, + NFSIOS_DELAY, + __NFSIOS_COUNTSMAX, +}; + +#endif /* _LINUX_NFS_IOSTAT */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index a1676e19e491..3c60685d972b 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -27,9 +27,12 @@ /* * Valid flags for a dirty buffer */ -#define PG_BUSY 0 -#define PG_NEED_COMMIT 1 -#define PG_NEED_RESCHED 2 +enum { + PG_BUSY = 0, + PG_CLEAN, + PG_NEED_COMMIT, + PG_NEED_RESCHED, +}; struct nfs_inode; struct nfs_page { diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 24263bb8e0be..8c77c11224d1 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -829,9 +829,8 @@ struct nfs_rpc_ops { int (*write_done) (struct rpc_task *, struct nfs_write_data *); void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); int (*commit_done) (struct rpc_task *, struct nfs_write_data *); - int (*file_open) (struct inode *, struct file *); - int (*file_release) (struct inode *, struct file *); int (*lock)(struct file *, int, struct file_lock *); + int (*lock_check_bounds)(const struct file_lock *); void (*clear_acl_cache)(struct inode *); }; diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 41d30c9c9de6..a2861d95ecc3 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -28,20 +28,20 @@ #define NFSD_SUPPORTED_MINOR_VERSION 0 /* - * Special flags for nfsd_permission. These must be different from MAY_READ, - * MAY_WRITE, and MAY_EXEC. + * Flags for nfsd_permission */ -#define MAY_NOP 0 -#define MAY_SATTR 8 -#define MAY_TRUNC 16 -#define MAY_LOCK 32 -#define MAY_OWNER_OVERRIDE 64 -#define MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ -#if (MAY_SATTR | MAY_TRUNC | MAY_LOCK | MAY_OWNER_OVERRIDE | MAY_LOCAL_ACCESS) & (MAY_READ | MAY_WRITE | MAY_EXEC) -# error "please use a different value for MAY_SATTR or MAY_TRUNC or MAY_LOCK or MAY_LOCAL_ACCESS or MAY_OWNER_OVERRIDE." -#endif -#define MAY_CREATE (MAY_EXEC|MAY_WRITE) -#define MAY_REMOVE (MAY_EXEC|MAY_WRITE|MAY_TRUNC) +#define NFSD_MAY_NOP 0 +#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */ +#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */ +#define NFSD_MAY_READ 4 /* == MAY_READ */ +#define NFSD_MAY_SATTR 8 +#define NFSD_MAY_TRUNC 16 +#define NFSD_MAY_LOCK 32 +#define NFSD_MAY_OWNER_OVERRIDE 64 +#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ + +#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) +#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) /* * Callback function for readdir @@ -54,6 +54,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); extern struct svc_program nfsd_program; extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; +extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; extern struct seq_operations nfs_exports_op; diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index db348f749376..d0fe2e378452 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -98,8 +98,6 @@ struct nfs4_callback { u32 cb_ident; /* RPC client info */ atomic_t cb_set; /* successful CB_NULL call */ - struct rpc_program cb_program; - struct rpc_stat cb_stat; struct rpc_clnt * cb_client; }; diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index ea6517e58b04..2be7c63bc0f2 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -122,13 +122,13 @@ enum nl80211_commands { NL80211_CMD_NEW_STATION, NL80211_CMD_DEL_STATION, - /* add commands here */ - NL80211_CMD_GET_MPATH, NL80211_CMD_SET_MPATH, NL80211_CMD_NEW_MPATH, NL80211_CMD_DEL_MPATH, + /* add commands here */ + /* used to define NL80211_CMD_MAX below */ __NL80211_CMD_AFTER_LAST, NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 @@ -230,18 +230,21 @@ enum nl80211_attrs { NL80211_ATTR_MNTR_FLAGS, - /* add attributes here, update the policy in nl80211.c */ - NL80211_ATTR_MESH_ID, NL80211_ATTR_STA_PLINK_ACTION, NL80211_ATTR_MPATH_NEXT_HOP, NL80211_ATTR_MPATH_INFO, + /* add attributes here, update the policy in nl80211.c */ + __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; -#define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_RATES 32 +#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 +#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 +#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 /** * enum nl80211_iftype - (virtual) interface types diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0ff6224d172a..bd3d72ddf333 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -197,6 +197,7 @@ static inline int notifier_to_errno(int ret) #define NETDEV_GOING_DOWN 0x0009 #define NETDEV_CHANGENAME 0x000A #define NETDEV_FEAT_CHANGE 0x000B +#define NETDEV_BONDING_FAILOVER 0x000C #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/of_device.h b/include/linux/of_device.h index afe338217d91..d3a74e00a3e1 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -24,4 +24,7 @@ static inline void of_device_free(struct of_device *dev) of_release_dev(&dev->dev); } +extern ssize_t of_device_get_modalias(struct of_device *ofdev, + char *str, ssize_t len); + #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 2ee97e9877a7..67db101d0eb8 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -15,7 +15,7 @@ #define __LINUX_OF_GPIO_H #include <linux/errno.h> -#include <asm/gpio.h> +#include <linux/gpio.h> #ifdef CONFIG_OF_GPIO diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f31debfac926..0d2a4e7012aa 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -157,6 +157,7 @@ PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) __PAGEFLAG(Slab, slab) PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ +PAGEFLAG(SavePinned, dirty); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index e875905f7b12..e8c06122be36 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -25,13 +25,11 @@ #include <linux/types.h> -/* Macro to aid the definition of ranges of bits */ -#define PB_range(name, required_bits) \ - name, name ## _end = (name + required_bits) - 1 - /* Bit indices that affect a whole block of pages */ enum pageblock_bits { - PB_range(PB_migrate, 3), /* 3 bits required for migrate types */ + PB_migrate, + PB_migrate_end = PB_migrate + 3 - 1, + /* 3 bits required for migrate types */ NR_PAGEBLOCK_BITS }; diff --git a/include/linux/pci.h b/include/linux/pci.h index d18b1dd49fab..a6a088e1a804 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -17,8 +17,7 @@ #ifndef LINUX_PCI_H #define LINUX_PCI_H -/* Include the pci register defines */ -#include <linux/pci_regs.h> +#include <linux/pci_regs.h> /* The pci register defines */ /* * The PCI interface treats multi-function devices as independent @@ -49,12 +48,22 @@ #include <linux/list.h> #include <linux/compiler.h> #include <linux/errno.h> +#include <linux/kobject.h> #include <asm/atomic.h> #include <linux/device.h> /* Include the ID list */ #include <linux/pci_ids.h> +/* pci_slot represents a physical slot */ +struct pci_slot { + struct pci_bus *bus; /* The bus this slot is on */ + struct list_head list; /* node in list of slots on this bus */ + struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ + unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ + struct kobject kobj; +}; + /* File state for mmap()s on /proc/bus/pci/X/Y */ enum pci_mmap_state { pci_mmap_io, @@ -142,6 +151,7 @@ struct pci_dev { void *sysdata; /* hook for sys-specific extension */ struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ + struct pci_slot *slot; /* Physical slot this device is in */ unsigned int devfn; /* encoded device & function index */ unsigned short vendor; @@ -167,6 +177,13 @@ struct pci_dev { pci_power_t current_state; /* Current operating state. In ACPI-speak, this is D0-D3, D0 being fully functional, and D3 being off. */ + int pm_cap; /* PM capability offset in the + configuration space */ + unsigned int pme_support:5; /* Bitmask of states from which PME# + can be generated */ + unsigned int d1_support:1; /* Low power state D1 is supported */ + unsigned int d2_support:1; /* Low power state D2 is supported */ + unsigned int no_d1d2:1; /* Only allow D0 and D3 */ #ifdef CONFIG_PCIEASPM struct pcie_link_state *link_state; /* ASPM link state. */ @@ -191,7 +208,6 @@ struct pci_dev { unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ - unsigned int no_d1d2:1; /* only allow d0 or d3 */ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int msi_enabled:1; @@ -267,6 +283,7 @@ struct pci_bus { struct list_head children; /* list of child buses */ struct list_head devices; /* list of devices on this bus */ struct pci_dev *self; /* bridge device as seen by parent */ + struct list_head slots; /* list of slots on this bus */ struct resource *resource[PCI_BUS_NUM_RESOURCES]; /* address space routed to this bus */ @@ -328,7 +345,7 @@ struct pci_bus_region { struct pci_dynids { spinlock_t lock; /* protects list, index */ struct list_head list; /* for IDs added at runtime */ - unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ + unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */ }; /* ---------------------------------------------------------------- */ @@ -390,7 +407,7 @@ struct pci_driver { int (*resume_early) (struct pci_dev *dev); int (*resume) (struct pci_dev *dev); /* Device woken up */ void (*shutdown) (struct pci_dev *dev); - + struct pm_ext_ops *pm; struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; @@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); +struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + const char *name); +void pci_destroy_slot(struct pci_slot *slot); +void pci_update_slot_number(struct pci_slot *slot, int slot_nr); int pci_scan_slot(struct pci_bus *bus, int devfn); struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); @@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); +int pci_prepare_to_sleep(struct pci_dev *dev); +int pci_back_from_sleep(struct pci_dev *dev); /* Functions for PCI Hotplug drivers to use */ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); @@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) return -EIO; } +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return -EIO; +} + static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) { @@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) /* If you want to know what to call your pci_dev, ask this function. * Again, it's a wrapper around the generic device. */ -static inline char *pci_name(struct pci_dev *pdev) +static inline const char *pci_name(struct pci_dev *pdev) { - return pdev->dev.bus_id; + return dev_name(&pdev->dev); } @@ -1014,7 +1042,9 @@ enum pci_fixup_pass { pci_fixup_header, /* After reading configuration header */ pci_fixup_final, /* Final phase of device fixups */ pci_fixup_enable, /* pci_enable_device() time */ - pci_fixup_resume, /* pci_enable_device() time */ + pci_fixup_resume, /* pci_device_resume() */ + pci_fixup_suspend, /* pci_device_suspend */ + pci_fixup_resume_early, /* pci_device_resume_early() */ }; /* Anonymous variables would be nice... */ @@ -1036,6 +1066,12 @@ enum pci_fixup_pass { #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ resume##vendor##device##hook, vendor, device, hook) +#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ + resume_early##vendor##device##hook, vendor, device, hook) +#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ + suspend##vendor##device##hook, vendor, device, hook) void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); @@ -1060,7 +1096,10 @@ extern int pci_pci_problems; extern unsigned long pci_cardbus_io_size; extern unsigned long pci_cardbus_mem_size; -extern int pcibios_add_platform_entries(struct pci_dev *dev); +int pcibios_add_platform_entries(struct pci_dev *dev); +void pcibios_disable_device(struct pci_dev *dev); +int pcibios_set_pcie_reset_state(struct pci_dev *dev, + enum pcie_reset_state state); #ifdef CONFIG_PCI_MMCONFIG extern void __init pci_mmcfg_early_init(void); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 8f67e8f2a3cc..a08cd06b541a 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -95,9 +95,6 @@ struct hotplug_slot_attribute { * @get_adapter_status: Called to get see if an adapter is present in the slot or not. * If this field is NULL, the value passed in the struct hotplug_slot_info * will be used when this value is requested by a user. - * @get_address: Called to get pci address of a slot. - * If this field is NULL, the value passed in the struct hotplug_slot_info - * will be used when this value is requested by a user. * @get_max_bus_speed: Called to get the max bus speed for a slot. * If this field is NULL, the value passed in the struct hotplug_slot_info * will be used when this value is requested by a user. @@ -120,7 +117,6 @@ struct hotplug_slot_ops { int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); - int (*get_address) (struct hotplug_slot *slot, u32 *value); int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); }; @@ -140,7 +136,6 @@ struct hotplug_slot_info { u8 attention_status; u8 latch_status; u8 adapter_status; - u32 address; enum pci_bus_speed max_bus_speed; enum pci_bus_speed cur_bus_speed; }; @@ -166,15 +161,14 @@ struct hotplug_slot { /* Variables below this are for use only by the hotplug pci core. */ struct list_head slot_list; - struct kobject kobj; + struct pci_slot *pci_slot; }; #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) -extern int pci_hp_register (struct hotplug_slot *slot); -extern int pci_hp_deregister (struct hotplug_slot *slot); +extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr); +extern int pci_hp_deregister(struct hotplug_slot *slot); extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, struct hotplug_slot_info *info); -extern struct kset *pci_hotplug_slots_kset; /* PCI Setting Record (Type 0) */ struct hpp_type0 { @@ -227,9 +221,9 @@ struct hotplug_params { #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/actypes.h> -extern acpi_status acpi_run_oshp(acpi_handle handle); extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, struct hotplug_params *hpp); +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); int acpi_root_bridge(acpi_handle handle); #endif #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 65953822c9cb..119ae7b8f028 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1950,6 +1950,8 @@ #define PCI_DEVICE_ID_NX2_5708 0x164c #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d #define PCI_DEVICE_ID_NX2_57710 0x164e +#define PCI_DEVICE_ID_NX2_57711 0x164f +#define PCI_DEVICE_ID_NX2_57711E 0x1650 #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 #define PCI_DEVICE_ID_TIGON3_5720 0x1658 @@ -1982,6 +1984,7 @@ #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5784 0x1698 +#define PCI_DEVICE_ID_TIGON3_5785 0x1699 #define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c @@ -2171,6 +2174,8 @@ #define PCI_DEVICE_ID_MPC8544 0x0033 #define PCI_DEVICE_ID_MPC8572E 0x0040 #define PCI_DEVICE_ID_MPC8572 0x0041 +#define PCI_DEVICE_ID_MPC8536E 0x0050 +#define PCI_DEVICE_ID_MPC8536 0x0051 #define PCI_DEVICE_ID_MPC8641 0x7010 #define PCI_DEVICE_ID_MPC8641D 0x7011 #define PCI_DEVICE_ID_MPC8610 0x7018 @@ -2188,6 +2193,7 @@ #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 #define PCI_VENDOR_ID_KORENIX 0x1982 @@ -2365,6 +2371,14 @@ #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index c0c1223c9194..19958b929905 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -231,6 +231,7 @@ #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ #define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ +#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ #define PCI_PM_CTRL 4 /* PM control and status register */ #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ #define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 9007ccdfc112..208388835357 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); -s64 __percpu_counter_sum(struct percpu_counter *fbc); +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { - s64 ret = __percpu_counter_sum(fbc); + s64 ret = __percpu_counter_sum(fbc, 0); return ret < 0 ? 0 : ret; } +static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc, 1); +} + + static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { - return __percpu_counter_sum(fbc); + return __percpu_counter_sum(fbc, 0); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index 99efbed81fa2..7cf7824df778 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h @@ -374,6 +374,7 @@ enum TCA_FLOW_ACT, TCA_FLOW_POLICE, TCA_FLOW_EMATCHES, + TCA_FLOW_PERTURB, __TCA_FLOW_MAX }; diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index dbb7ac37960d..e5de421ac7b4 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -85,6 +85,26 @@ struct tc_ratespec #define TC_RTAB_SIZE 1024 +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum { + TCA_STAB_UNSPEC, + TCA_STAB_BASE, + TCA_STAB_DATA, + __TCA_STAB_MAX +}; + +#define TCA_STAB_MAX (__TCA_STAB_MAX - 1) + /* FIFO section */ struct tc_fifo_qopt @@ -103,15 +123,6 @@ struct tc_prio_qopt __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ }; -enum -{ - TCA_PRIO_UNSPEC, - TCA_PRIO_MQ, - __TCA_PRIO_MAX -}; - -#define TCA_PRIO_MAX (__TCA_PRIO_MAX - 1) - /* TBF section */ struct tc_tbf_qopt diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 3261681c82a4..95ac21ab3a09 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -53,6 +53,7 @@ struct platform_driver { int (*suspend_late)(struct platform_device *, pm_message_t state); int (*resume_early)(struct platform_device *); int (*resume)(struct platform_device *); + struct pm_ext_ops *pm; struct device_driver driver; }; diff --git a/include/linux/pm.h b/include/linux/pm.h index 39a7ee859b67..4ad9de94449a 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -112,7 +112,9 @@ typedef struct pm_message { int event; } pm_message_t; -/* +/** + * struct pm_ops - device PM callbacks + * * Several driver power state transitions are externally visible, affecting * the state of pending I/O queues and (for drivers that touch hardware) * interrupts, wakeups, DMA, and other hardware state. There may also be @@ -120,6 +122,284 @@ typedef struct pm_message { * to the rest of the driver stack (such as a driver that's ON gating off * clocks which are not in active use). * + * The externally visible transitions are handled with the help of the following + * callbacks included in this structure: + * + * @prepare: Prepare the device for the upcoming transition, but do NOT change + * its hardware state. Prevent new children of the device from being + * registered after @prepare() returns (the driver's subsystem and + * generally the rest of the kernel is supposed to prevent new calls to the + * probe method from being made too once @prepare() has succeeded). If + * @prepare() detects a situation it cannot handle (e.g. registration of a + * child already in progress), it may return -EAGAIN, so that the PM core + * can execute it once again (e.g. after the new child has been registered) + * to recover from the race condition. This method is executed for all + * kinds of suspend transitions and is followed by one of the suspend + * callbacks: @suspend(), @freeze(), or @poweroff(). + * The PM core executes @prepare() for all devices before starting to + * execute suspend callbacks for any of them, so drivers may assume all of + * the other devices to be present and functional while @prepare() is being + * executed. In particular, it is safe to make GFP_KERNEL memory + * allocations from within @prepare(). However, drivers may NOT assume + * anything about the availability of the user space at that time and it + * is not correct to request firmware from within @prepare() (it's too + * late to do that). [To work around this limitation, drivers may + * register suspend and hibernation notifiers that are executed before the + * freezing of tasks.] + * + * @complete: Undo the changes made by @prepare(). This method is executed for + * all kinds of resume transitions, following one of the resume callbacks: + * @resume(), @thaw(), @restore(). Also called if the state transition + * fails before the driver's suspend callback (@suspend(), @freeze(), + * @poweroff()) can be executed (e.g. if the suspend callback fails for one + * of the other devices that the PM core has unsuccessfully attempted to + * suspend earlier). + * The PM core executes @complete() after it has executed the appropriate + * resume callback for all devices. + * + * @suspend: Executed before putting the system into a sleep state in which the + * contents of main memory are preserved. Quiesce the device, put it into + * a low power state appropriate for the upcoming system state (such as + * PCI_D3hot), and enable wakeup events as appropriate. + * + * @resume: Executed after waking the system up from a sleep state in which the + * contents of main memory were preserved. Put the device into the + * appropriate state, according to the information saved in memory by the + * preceding @suspend(). The driver starts working again, responding to + * hardware events and software requests. The hardware may have gone + * through a power-off reset, or it may have maintained state from the + * previous suspend() which the driver may rely on while resuming. On most + * platforms, there are no restrictions on availability of resources like + * clocks during @resume(). + * + * @freeze: Hibernation-specific, executed before creating a hibernation image. + * Quiesce operations so that a consistent image can be created, but do NOT + * otherwise put the device into a low power device state and do NOT emit + * system wakeup events. Save in main memory the device settings to be + * used by @restore() during the subsequent resume from hibernation or by + * the subsequent @thaw(), if the creation of the image or the restoration + * of main memory contents from it fails. + * + * @thaw: Hibernation-specific, executed after creating a hibernation image OR + * if the creation of the image fails. Also executed after a failing + * attempt to restore the contents of main memory from such an image. + * Undo the changes made by the preceding @freeze(), so the device can be + * operated in the same way as immediately before the call to @freeze(). + * + * @poweroff: Hibernation-specific, executed after saving a hibernation image. + * Quiesce the device, put it into a low power state appropriate for the + * upcoming system state (such as PCI_D3hot), and enable wakeup events as + * appropriate. + * + * @restore: Hibernation-specific, executed after restoring the contents of main + * memory from a hibernation image. Driver starts working again, + * responding to hardware events and software requests. Drivers may NOT + * make ANY assumptions about the hardware state right prior to @restore(). + * On most platforms, there are no restrictions on availability of + * resources like clocks during @restore(). + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by the resume operations, @resume(), + * @thaw(), and @restore(), do not cause the PM core to abort the resume + * transition during which they are returned. The error codes returned in + * that cases are only printed by the PM core to the system logs for debugging + * purposes. Still, it is recommended that drivers only return error codes + * from their resume methods in case of an unrecoverable failure (i.e. when the + * device being handled refuses to resume and becomes unusable) to allow us to + * modify the PM core in the future, so that it can avoid attempting to handle + * devices that failed to resume and their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, it is not allowed to unregister a device from within any + * of its own callbacks. + */ + +struct pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); +}; + +/** + * struct pm_ext_ops - extended device PM callbacks + * + * Some devices require certain operations related to suspend and hibernation + * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below + * is defined, adding callbacks to be executed with interrupts disabled to + * 'struct pm_ops'. + * + * The following callbacks included in 'struct pm_ext_ops' are executed with + * the nonboot CPUs switched off and with interrupts disabled on the only + * functional CPU. They also are executed with the PM core list of devices + * locked, so they must NOT unregister any devices. + * + * @suspend_noirq: Complete the operations of ->suspend() by carrying out any + * actions required for suspending the device that need interrupts to be + * disabled + * + * @resume_noirq: Prepare for the execution of ->resume() by carrying out any + * actions required for resuming the device that need interrupts to be + * disabled + * + * @freeze_noirq: Complete the operations of ->freeze() by carrying out any + * actions required for freezing the device that need interrupts to be + * disabled + * + * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any + * actions required for thawing the device that need interrupts to be + * disabled + * + * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any + * actions required for handling the device that need interrupts to be + * disabled + * + * @restore_noirq: Prepare for the execution of ->restore() by carrying out any + * actions required for restoring the operations of the device that need + * interrupts to be disabled + * + * All of the above callbacks return error codes, but the error codes returned + * by the resume operations, @resume_noirq(), @thaw_noirq(), and + * @restore_noirq(), do not cause the PM core to abort the resume transition + * during which they are returned. The error codes returned in that cases are + * only printed by the PM core to the system logs for debugging purposes. + * Still, as stated above, it is recommended that drivers only return error + * codes from their resume methods if the device being handled fails to resume + * and is not usable any more. + */ + +struct pm_ext_ops { + struct pm_ops base; + int (*suspend_noirq)(struct device *dev); + int (*resume_noirq)(struct device *dev); + int (*freeze_noirq)(struct device *dev); + int (*thaw_noirq)(struct device *dev); + int (*poweroff_noirq)(struct device *dev); + int (*restore_noirq)(struct device *dev); +}; + +/** + * PM_EVENT_ messages + * + * The following PM_EVENT_ messages are defined for the internal use of the PM + * core, in order to provide a mechanism allowing the high level suspend and + * hibernation code to convey the necessary information to the device PM core + * code: + * + * ON No transition. + * + * FREEZE System is going to hibernate, call ->prepare() and ->freeze() + * for all devices. + * + * SUSPEND System is going to suspend, call ->prepare() and ->suspend() + * for all devices. + * + * HIBERNATE Hibernation image has been saved, call ->prepare() and + * ->poweroff() for all devices. + * + * QUIESCE Contents of main memory are going to be restored from a (loaded) + * hibernation image, call ->prepare() and ->freeze() for all + * devices. + * + * RESUME System is resuming, call ->resume() and ->complete() for all + * devices. + * + * THAW Hibernation image has been created, call ->thaw() and + * ->complete() for all devices. + * + * RESTORE Contents of main memory have been restored from a hibernation + * image, call ->restore() and ->complete() for all devices. + * + * RECOVER Creation of a hibernation image or restoration of the main + * memory contents from a hibernation image has failed, call + * ->thaw() and ->complete() for all devices. + */ + +#define PM_EVENT_ON 0x0000 +#define PM_EVENT_FREEZE 0x0001 +#define PM_EVENT_SUSPEND 0x0002 +#define PM_EVENT_HIBERNATE 0x0004 +#define PM_EVENT_QUIESCE 0x0008 +#define PM_EVENT_RESUME 0x0010 +#define PM_EVENT_THAW 0x0020 +#define PM_EVENT_RESTORE 0x0040 +#define PM_EVENT_RECOVER 0x0080 + +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) + +#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) +#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) +#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) +#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) +#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) +#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) +#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) +#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) + +/** + * Device power management states + * + * These state labels are used internally by the PM core to indicate the current + * status of a device with respect to the PM core operations. + * + * DPM_ON Device is regarded as operational. Set this way + * initially and when ->complete() is about to be called. + * Also set when ->prepare() fails. + * + * DPM_PREPARING Device is going to be prepared for a PM transition. Set + * when ->prepare() is about to be called. + * + * DPM_RESUMING Device is going to be resumed. Set when ->resume(), + * ->thaw(), or ->restore() is about to be called. + * + * DPM_SUSPENDING Device has been prepared for a power transition. Set + * when ->prepare() has just succeeded. + * + * DPM_OFF Device is regarded as inactive. Set immediately after + * ->suspend(), ->freeze(), or ->poweroff() has succeeded. + * Also set when ->resume()_noirq, ->thaw_noirq(), or + * ->restore_noirq() is about to be called. + * + * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after + * ->suspend_noirq(), ->freeze_noirq(), or + * ->poweroff_noirq() has just succeeded. + */ + +enum dpm_state { + DPM_INVALID, + DPM_ON, + DPM_PREPARING, + DPM_RESUMING, + DPM_SUSPENDING, + DPM_OFF, + DPM_OFF_IRQ, +}; + +struct dev_pm_info { + pm_message_t power_state; + unsigned can_wakeup:1; + unsigned should_wakeup:1; + enum dpm_state status; /* Owned by the PM core */ +#ifdef CONFIG_PM_SLEEP + struct list_head entry; +#endif +}; + +/* + * The PM_EVENT_ messages are also used by drivers implementing the legacy + * suspend framework, based on the ->suspend() and ->resume() callbacks common + * for suspend and hibernation transitions, according to the rules below. + */ + +/* Necessary, because several drivers use PM_EVENT_PRETHAW */ +#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE + +/* * One transition is triggered by resume(), after a suspend() call; the * message is implicit: * @@ -164,35 +444,13 @@ typedef struct pm_message { * or from system low-power states such as standby or suspend-to-RAM. */ -#define PM_EVENT_ON 0 -#define PM_EVENT_FREEZE 1 -#define PM_EVENT_SUSPEND 2 -#define PM_EVENT_HIBERNATE 4 -#define PM_EVENT_PRETHAW 8 - -#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) - -#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) -#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, }) -#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) -#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) -#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) - -struct dev_pm_info { - pm_message_t power_state; - unsigned can_wakeup:1; - unsigned should_wakeup:1; - bool sleeping:1; /* Owned by the PM core */ -#ifdef CONFIG_PM_SLEEP - struct list_head entry; -#endif -}; +#ifdef CONFIG_PM_SLEEP +extern void device_pm_lock(void); +extern void device_power_up(pm_message_t state); +extern void device_resume(pm_message_t state); +extern void device_pm_unlock(void); extern int device_power_down(pm_message_t state); -extern void device_power_up(void); -extern void device_resume(void); - -#ifdef CONFIG_PM_SLEEP extern int device_suspend(pm_message_t state); extern int device_prepare_suspend(pm_message_t state); diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index f0d0b2cb8d20..0aae7776185e 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val) dev->power.can_wakeup = dev->power.should_wakeup = !!val; } +static inline void device_set_wakeup_capable(struct device *dev, int val) +{ + dev->power.can_wakeup = !!val; +} + static inline int device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; @@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val) static inline int device_may_wakeup(struct device *dev) { - return dev->power.can_wakeup & dev->power.should_wakeup; -} - -/* - * Platform hook to activate device wakeup capability, if that's not already - * handled by enable_irq_wake() etc. - * Returns zero on success, else negative errno - */ -extern int (*platform_enable_wakeup)(struct device *dev, int is_on); - -static inline int call_platform_enable_wakeup(struct device *dev, int is_on) -{ - if (platform_enable_wakeup) - return (*platform_enable_wakeup)(dev, is_on); - return 0; + return dev->power.can_wakeup && dev->power.should_wakeup; } #else /* !CONFIG_PM */ @@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val) dev->power.can_wakeup = !!val; } +static inline void device_set_wakeup_capable(struct device *dev, int val) { } + static inline int device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; @@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev) #define device_set_wakeup_enable(dev, val) do {} while (0) #define device_may_wakeup(dev) 0 -static inline int call_platform_enable_wakeup(struct device *dev, int is_on) -{ - return 0; -} - #endif /* !CONFIG_PM */ #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 63b128d512fb..1ce54b63085d 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -1,6 +1,8 @@ /* * Linux Plug and Play Support * Copyright by Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #ifndef _LINUX_PNP_H @@ -15,7 +17,6 @@ struct pnp_protocol; struct pnp_dev; -struct pnp_resource_table; /* * Resource Management @@ -24,7 +25,14 @@ struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int); static inline int pnp_resource_valid(struct resource *res) { - if (res && !(res->flags & IORESOURCE_UNSET)) + if (res) + return 1; + return 0; +} + +static inline int pnp_resource_enabled(struct resource *res) +{ + if (res && !(res->flags & IORESOURCE_DISABLED)) return 1; return 0; } @@ -40,19 +48,31 @@ static inline resource_size_t pnp_resource_len(struct resource *res) static inline resource_size_t pnp_port_start(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_IO, bar)->start; + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; } static inline resource_size_t pnp_port_end(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_IO, bar)->end; + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; } static inline unsigned long pnp_port_flags(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags; + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IO | IORESOURCE_AUTO; } static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) @@ -63,25 +83,41 @@ static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) static inline resource_size_t pnp_port_len(struct pnp_dev *dev, unsigned int bar) { - return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar)); + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; } static inline resource_size_t pnp_mem_start(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start; + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; } static inline resource_size_t pnp_mem_end(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end; + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; } static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags; + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_MEM | IORESOURCE_AUTO; } static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) @@ -92,18 +128,30 @@ static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) static inline resource_size_t pnp_mem_len(struct pnp_dev *dev, unsigned int bar) { - return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar)); + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; } static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start; + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; } static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags; + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IRQ | IORESOURCE_AUTO; } static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) @@ -114,12 +162,20 @@ static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start; + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; } static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar) { - return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags; + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_DMA | IORESOURCE_AUTO; } static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) @@ -128,57 +184,6 @@ static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) } -#define PNP_PORT_FLAG_16BITADDR (1<<0) -#define PNP_PORT_FLAG_FIXED (1<<1) - -struct pnp_port { - unsigned short min; /* min base number */ - unsigned short max; /* max base number */ - unsigned char align; /* align boundary */ - unsigned char size; /* size of range */ - unsigned char flags; /* port flags */ - unsigned char pad; /* pad */ - struct pnp_port *next; /* next port */ -}; - -#define PNP_IRQ_NR 256 -struct pnp_irq { - DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */ - unsigned char flags; /* IRQ flags */ - unsigned char pad; /* pad */ - struct pnp_irq *next; /* next IRQ */ -}; - -struct pnp_dma { - unsigned char map; /* bitmask for DMA channels */ - unsigned char flags; /* DMA flags */ - struct pnp_dma *next; /* next port */ -}; - -struct pnp_mem { - unsigned int min; /* min base number */ - unsigned int max; /* max base number */ - unsigned int align; /* align boundary */ - unsigned int size; /* size of range */ - unsigned char flags; /* memory flags */ - unsigned char pad; /* pad */ - struct pnp_mem *next; /* next memory resource */ -}; - -#define PNP_RES_PRIORITY_PREFERRED 0 -#define PNP_RES_PRIORITY_ACCEPTABLE 1 -#define PNP_RES_PRIORITY_FUNCTIONAL 2 -#define PNP_RES_PRIORITY_INVALID 65535 - -struct pnp_option { - unsigned short priority; /* priority */ - struct pnp_port *port; /* first port */ - struct pnp_irq *irq; /* first IRQ */ - struct pnp_dma *dma; /* first DMA */ - struct pnp_mem *mem; /* first memory resource */ - struct pnp_option *next; /* used to chain dependent resources */ -}; - /* * Device Management */ @@ -246,9 +251,9 @@ struct pnp_dev { int active; int capabilities; - struct pnp_option *independent; - struct pnp_option *dependent; - struct pnp_resource_table *res; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; char name[PNP_NAME_LEN]; /* contains a human-readable name */ int flags; /* used by protocols */ @@ -425,6 +430,8 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv); extern struct list_head pnp_cards; /* resource management */ +int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base, + resource_size_t size); int pnp_auto_config_dev(struct pnp_dev *dev); int pnp_start_dev(struct pnp_dev *dev); int pnp_stop_dev(struct pnp_dev *dev); @@ -452,6 +459,9 @@ static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { } /* resource management */ +static inline int pnp_possible_config(struct pnp_dev *dev, int type, + resource_size_t base, + resource_size_t size) { return 0; } static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h index e86a7a5cf355..b8d4ddd22736 100644 --- a/include/linux/ppp-comp.h +++ b/include/linux/ppp-comp.h @@ -23,8 +23,6 @@ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, * OR MODIFICATIONS. - * - * $Id: ppp-comp.h,v 1.6 1997/11/27 06:04:44 paulus Exp $ */ /* diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h index c6b13ff85028..6e8adc77522c 100644 --- a/include/linux/ppp_defs.h +++ b/include/linux/ppp_defs.h @@ -1,5 +1,3 @@ -/* $Id: ppp_defs.h,v 1.2 1994/09/21 01:31:06 paulus Exp $ */ - /* * ppp_defs.h - PPP definitions. * diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 23f0c54175cd..72b1a10a59b6 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -10,7 +10,7 @@ #include <linux/linkage.h> #include <linux/list.h> -#ifdef CONFIG_DEBUG_PREEMPT +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void add_preempt_count(int val); extern void sub_preempt_count(int val); #else @@ -52,6 +52,34 @@ do { \ preempt_check_resched(); \ } while (0) +/* For debugging and tracer internals only! */ +#define add_preempt_count_notrace(val) \ + do { preempt_count() += (val); } while (0) +#define sub_preempt_count_notrace(val) \ + do { preempt_count() -= (val); } while (0) +#define inc_preempt_count_notrace() add_preempt_count_notrace(1) +#define dec_preempt_count_notrace() sub_preempt_count_notrace(1) + +#define preempt_disable_notrace() \ +do { \ + inc_preempt_count_notrace(); \ + barrier(); \ +} while (0) + +#define preempt_enable_no_resched_notrace() \ +do { \ + barrier(); \ + dec_preempt_count_notrace(); \ +} while (0) + +/* preempt_check_resched is OK to trace */ +#define preempt_enable_notrace() \ +do { \ + preempt_enable_no_resched_notrace(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + #else #define preempt_disable() do { } while (0) @@ -59,6 +87,10 @@ do { \ #define preempt_enable() do { } while (0) #define preempt_check_resched() do { } while (0) +#define preempt_disable_notrace() do { } while (0) +#define preempt_enable_no_resched_notrace() do { } while (0) +#define preempt_enable_notrace() do { } while (0) + #endif #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index fff1d27ddb4c..15a9eaf4a802 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -305,8 +305,6 @@ static inline struct net *PDE_NET(struct proc_dir_entry *pde) return pde->parent->data; } -struct net *get_proc_net(const struct inode *inode); - struct proc_maps_private { struct pid *pid; struct task_struct *task; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index f98501ba557e..c6f5f9dd0cee 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -95,8 +95,12 @@ extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); extern void ptrace_untrace(struct task_struct *child); -extern int ptrace_may_attach(struct task_struct *task); -extern int __ptrace_may_attach(struct task_struct *task); +#define PTRACE_MODE_READ 1 +#define PTRACE_MODE_ATTACH 2 +/* Returns 0 on success, -errno on denial. */ +extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); +/* Returns true on success, false on denial. */ +extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); static inline int ptrace_reparented(struct task_struct *child) { diff --git a/include/linux/pwm.h b/include/linux/pwm.h new file mode 100644 index 000000000000..3945f803d514 --- /dev/null +++ b/include/linux/pwm.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_PWM_H +#define __LINUX_PWM_H + +struct pwm_device; + +/* + * pwm_request - request a PWM device + */ +struct pwm_device *pwm_request(int pwm_id, const char *label); + +/* + * pwm_free - free a PWM device + */ +void pwm_free(struct pwm_device *pwm); + +/* + * pwm_config - change a PWM device configuration + */ +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); + +/* + * pwm_enable - start a PWM output toggling + */ +int pwm_enable(struct pwm_device *pwm); + +/* + * pwm_disable - stop a PWM output toggling + */ +void pwm_disable(struct pwm_device *pwm); + +#endif /* __ASM_ARCH_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h new file mode 100644 index 000000000000..7a9754c96775 --- /dev/null +++ b/include/linux/pwm_backlight.h @@ -0,0 +1,17 @@ +/* + * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c + */ +#ifndef __LINUX_PWM_BACKLIGHT_H +#define __LINUX_PWM_BACKLIGHT_H + +struct platform_pwm_backlight_data { + int pwm_id; + unsigned int max_brightness; + unsigned int dft_brightness; + unsigned int pwm_period_ns; + int (*init)(struct device *dev); + int (*notify)(int brightness); + void (*exit)(struct device *dev); +}; + +#endif diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index 78bfdea24a8e..e98900671ca9 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h @@ -221,6 +221,7 @@ struct bitmap { unsigned long syncchunk; __u64 events_cleared; + int need_sync; /* bitmap spinlock */ spinlock_t lock; diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h index ba15469daf11..7e375111d007 100644 --- a/include/linux/raid/linear.h +++ b/include/linux/raid/linear.h @@ -16,7 +16,7 @@ struct linear_private_data struct linear_private_data *prev; /* earlier version */ dev_info_t **hash_table; sector_t hash_spacing; - sector_t array_size; + sector_t array_sectors; int preshift; /* shift before dividing by hash_spacing */ dev_info_t disks[0]; }; diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index b7386ae9d288..dc0e3fcb9f28 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -95,7 +95,7 @@ extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); -extern void md_allow_write(mddev_t *mddev); +extern int md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); #endif /* CONFIG_MD */ diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 3dea9f545c8f..9f2549ac0e2d 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -59,7 +59,7 @@ struct mdk_rdev_s int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ - sector_t sb_offset; + sector_t sb_start; /* offset of the super block (in 512byte sectors) */ int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ @@ -87,6 +87,9 @@ struct mdk_rdev_s #define Blocked 8 /* An error occured on an externally * managed array, don't allow writes * until it is cleared */ +#define StateChanged 9 /* Faulty or Blocked has changed during + * interrupt, so it needs to be + * notified by the thread */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ @@ -147,7 +150,7 @@ struct mddev_s int raid_disks; int max_disks; sector_t size; /* used size of component devices */ - sector_t array_size; /* exported array size */ + sector_t array_sectors; /* exported array size */ __u64 events; char uuid[16]; @@ -188,6 +191,7 @@ struct mddev_s * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started * SYNC: actually doing a resync, not a recovery + * RECOVER: doing recovery, or need to try it. * INTR: resync needs to be aborted for some reason * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) @@ -198,6 +202,7 @@ struct mddev_s */ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 +#define MD_RECOVERY_RECOVER 2 #define MD_RECOVERY_INTR 3 #define MD_RECOVERY_DONE 4 #define MD_RECOVERY_NEEDED 5 @@ -210,7 +215,8 @@ struct mddev_s int in_sync; /* know to not need resync */ struct mutex reconfig_mutex; - atomic_t active; + atomic_t active; /* general refcount */ + atomic_t openers; /* number of active opens */ int changed; /* true if we might need to reread partition info */ int degraded; /* whether md should consider @@ -227,6 +233,8 @@ struct mddev_s atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; + sector_t resync_min; /* user requested sync + * starts here */ sector_t resync_max; /* resync should pause * when it gets here */ @@ -331,6 +339,9 @@ static inline char * mdname (mddev_t * mddev) #define rdev_for_each(rdev, tmp, mddev) \ rdev_for_each_list(rdev, tmp, (mddev)->disks) +#define rdev_for_each_rcu(rdev, mddev) \ + list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) + typedef struct mdk_thread_s { void (*run) (mddev_t *mddev); mddev_t *mddev; diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 3f2cd98c508b..8b4de4a41ff1 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -43,14 +43,11 @@ */ #define MD_RESERVED_BYTES (64 * 1024) #define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) -#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE) #define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) -#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS) #define MD_SB_BYTES 4096 #define MD_SB_WORDS (MD_SB_BYTES / 4) -#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE) #define MD_SB_SECTORS (MD_SB_BYTES / 512) /* diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index f0827d31ae6f..3b2672792457 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -158,6 +158,43 @@ * the compute block completes. */ +/* + * Operations state - intermediate states that are visible outside of sh->lock + * In general _idle indicates nothing is running, _run indicates a data + * processing operation is active, and _result means the data processing result + * is stable and can be acted upon. For simple operations like biofill and + * compute that only have an _idle and _run state they are indicated with + * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) + */ +/** + * enum check_states - handles syncing / repairing a stripe + * @check_state_idle - check operations are quiesced + * @check_state_run - check operation is running + * @check_state_result - set outside lock when check result is valid + * @check_state_compute_run - check failed and we are repairing + * @check_state_compute_result - set outside lock when compute result is valid + */ +enum check_states { + check_state_idle = 0, + check_state_run, /* parity check */ + check_state_check_result, + check_state_compute_run, /* parity repair */ + check_state_compute_result, +}; + +/** + * enum reconstruct_states - handles writing or expanding a stripe + */ +enum reconstruct_states { + reconstruct_state_idle = 0, + reconstruct_state_prexor_drain_run, /* prexor-write */ + reconstruct_state_drain_run, /* write */ + reconstruct_state_run, /* expand */ + reconstruct_state_prexor_drain_result, + reconstruct_state_drain_result, + reconstruct_state_result, +}; + struct stripe_head { struct hlist_node hash; struct list_head lru; /* inactive_list or handle_list */ @@ -169,19 +206,13 @@ struct stripe_head { spinlock_t lock; int bm_seq; /* sequence number for bitmap flushes */ int disks; /* disks in stripe */ + enum check_states check_state; + enum reconstruct_states reconstruct_state; /* stripe_operations - * @pending - pending ops flags (set for request->issue->complete) - * @ack - submitted ops flags (set for issue->complete) - * @complete - completed ops flags (set for complete) * @target - STRIPE_OP_COMPUTE_BLK target - * @count - raid5_runs_ops is set to run when this is non-zero */ struct stripe_operations { - unsigned long pending; - unsigned long ack; - unsigned long complete; int target; - int count; u32 zero_sum_result; } ops; struct r5dev { @@ -202,6 +233,7 @@ struct stripe_head_state { int locked, uptodate, to_read, to_write, failed, written; int to_fill, compute, req_compute, non_overwrite; int failed_num; + unsigned long ops_request; }; /* r6_state - extra state data only relevant to r6 */ @@ -228,9 +260,7 @@ struct r6_state { #define R5_Wantfill 12 /* dev->toread contains a bio that needs * filling */ -#define R5_Wantprexor 13 /* distinguish blocks ready for rmw from - * other "towrites" - */ +#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ /* * Write method */ @@ -254,8 +284,10 @@ struct r6_state { #define STRIPE_EXPAND_READY 11 #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ +#define STRIPE_BIOFILL_RUN 14 +#define STRIPE_COMPUTE_RUN 15 /* - * Operations flags (in issue order) + * Operation request flags */ #define STRIPE_OP_BIOFILL 0 #define STRIPE_OP_COMPUTE_BLK 1 @@ -263,14 +295,6 @@ struct r6_state { #define STRIPE_OP_BIODRAIN 3 #define STRIPE_OP_POSTXOR 4 #define STRIPE_OP_CHECK 5 -#define STRIPE_OP_IO 6 - -/* modifiers to the base operations - * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back - * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check - */ -#define STRIPE_OP_MOD_REPAIR_PD 7 -#define STRIPE_OP_MOD_DMA_CHECK 8 /* * Plugging: diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index b3aa05baab8a..8c774905dcfe 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map; #define __synchronize_sched() synchronize_rcu() +#define call_rcu_sched(head, func) call_rcu(head, func) + extern void __rcu_init(void); +#define rcu_init_sched() do { } while (0) extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index bde4586f4382..b0f39be08b6c 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -1,6 +1,373 @@ #ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ #include <linux/list.h> +#include <linux/rcupdate.h> + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next) +{ + new->next = next; + new->prev = prev; + rcu_assign_pointer(prev->next, new); + next->prev = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_tail_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * Note: list_empty() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_del_rcu() + * or list_add_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = LIST_POISON2; +} + +/** + * list_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + * Note: @old should not be empty. + */ +static inline void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + rcu_assign_pointer(new->prev->next, new); + new->next->prev = new; + old->prev = LIST_POISON2; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list. + * @list: the RCU-protected list to splice + * @head: the place in the list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * + * @head can be RCU-read traversed concurrently with this function. + * + * Note that this function blocks. + * + * Important note: the caller must take whatever action is necessary to + * prevent any other updates to @head. In principle, it is possible + * to modify the list as soon as sync() begins execution. + * If this sort of thing becomes necessary, an alternative version + * based on call_rcu() could be created. But only if -really- + * needed -- there is no shortage of RCU API members. + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + if (list_empty(head)) + return; + + /* "first" and "last" tracking list, so initialize it. */ + + INIT_LIST_HEAD(list); + + /* + * At this point, the list body still points to the source list. + * Wait for any readers to finish using the list before splicing + * the list body into the new list. Any new readers will see + * an empty list. + */ + + sync(); + + /* + * Readers are finished with the source list, so perform splice. + * The order is important if the new list is global and accessible + * to concurrent RCU readers. Note that RCU readers are not + * permitted to traverse the prev pointers without excluding + * this function. + */ + + last->next = at; + rcu_assign_pointer(head->next, first); + first->prev = head; + at->prev = last; +} + +/** + * list_for_each_rcu - iterate over an rcu-protected list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_rcu(pos, head) \ + for (pos = rcu_dereference((head)->next); \ + prefetch(pos->next), pos != (head); \ + pos = rcu_dereference(pos->next)) + +#define __list_for_each_rcu(pos, head) \ + for (pos = rcu_dereference((head)->next); \ + pos != (head); \ + pos = rcu_dereference(pos->next)) + +/** + * list_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) + + +/** + * list_for_each_continue_rcu + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Iterate over an rcu-protected list, continuing after current point. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_continue_rcu(pos, head) \ + for ((pos) = rcu_dereference((pos)->next); \ + prefetch((pos)->next), (pos) != (head); \ + (pos) = rcu_dereference((pos)->next)) + +/** + * hlist_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry(). + */ +static inline void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + */ +static inline void hlist_replace_rcu(struct hlist_node *old, + struct hlist_node *new) +{ + struct hlist_node *next = old->next; + + new->next = next; + new->pprev = old->pprev; + rcu_assign_pointer(*new->pprev, new); + if (next) + new->next->pprev = &new->next; + old->pprev = LIST_POISON2; +} + +/** + * hlist_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + rcu_assign_pointer(h->first, n); + if (first) + first->pprev = &n->next; +} + +/** + * hlist_add_before_rcu + * @n: the new element to add to the hash list. + * @next: the existing element to add the new element before. + * + * Description: + * Adds the specified element to the specified hlist + * before the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_before_rcu(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + rcu_assign_pointer(*(n->pprev), n); + next->pprev = &n->next; +} + +/** + * hlist_add_after_rcu + * @prev: the existing element to add the new element after. + * @n: the new element to add to the hash list. + * + * Description: + * Adds the specified element to the specified hlist + * after the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_after_rcu(struct hlist_node *prev, + struct hlist_node *n) +{ + n->next = prev->next; + n->pprev = &prev->next; + rcu_assign_pointer(prev->next, n); + if (n->next) + n->next->pprev = &n->next; +} + +/** + * hlist_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference((head)->first); \ + pos && ({ prefetch(pos->next); 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference(pos->next)) -#endif /* _LINUX_RCULIST_H */ +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d42dbec06083..e8b4039cfb2f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,6 +40,7 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> +#include <linux/completion.h> /** * struct rcu_head - callback structure for use with RCU @@ -168,6 +169,27 @@ struct rcu_head { (p) = (v); \ }) +/* Infrastructure to implement the synchronize_() primitives. */ + +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; + +extern void wakeme_after_rcu(struct rcu_head *head); + +#define synchronize_rcu_xxx(name, func) \ +void name(void) \ +{ \ + struct rcu_synchronize rcu; \ + \ + init_completion(&rcu.completion); \ + /* Will wake me after RCU finished. */ \ + func(&rcu.head, wakeme_after_rcu); \ + /* Wait for it. */ \ + wait_for_completion(&rcu.completion); \ +} + /** * synchronize_sched - block until all CPUs have exited any non-preemptive * kernel code sequences. @@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head, /* Exported common interfaces */ extern void synchronize_rcu(void); extern void rcu_barrier(void); -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); /* Internal to kernel */ extern void rcu_init(void); diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 8a05c7e20bc4..f04b64eca636 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -40,10 +40,39 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> -#define rcu_qsctr_inc(cpu) +struct rcu_dyntick_sched { + int dynticks; + int dynticks_snap; + int sched_qs; + int sched_qs_snap; + int sched_dynticks_snap; +}; + +DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); + +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); + + rdssp->sched_qs++; +} #define rcu_bh_qsctr_inc(cpu) #define call_rcu_bh(head, rcu) call_rcu(head, rcu) +/** + * call_rcu_sched - Queue RCU callback for invocation after sched grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full + * synchronize_sched()-style grace period elapses, in other words after + * all currently executing preempt-disabled sections of code (including + * hardirq handlers, NMI handlers, and local_irq_save() blocks) have + * completed. + */ +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *head)); + extern void __rcu_read_lock(void) __acquires(RCU); extern void __rcu_read_unlock(void) __releases(RCU); extern int rcu_pending(int cpu); @@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu); extern void __synchronize_sched(void); extern void __rcu_init(void); +extern void rcu_init_sched(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); @@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); struct softirq_action; #ifdef CONFIG_NO_HZ -DECLARE_PER_CPU(long, dynticks_progress_counter); +DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); static inline void rcu_enter_nohz(void) { smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ - __get_cpu_var(dynticks_progress_counter)++; - WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); + __get_cpu_var(rcu_dyntick_sched).dynticks++; + WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); } static inline void rcu_exit_nohz(void) { - __get_cpu_var(dynticks_progress_counter)++; smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ - WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); + __get_cpu_var(rcu_dyntick_sched).dynticks++; + WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); } #else /* CONFIG_NO_HZ */ diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index f3f4f28c6960..c9ba2fdf807d 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h @@ -8,7 +8,7 @@ extern int pm_trace_enabled; struct device; extern void set_trace_device(struct device *); -extern void generate_resume_trace(void *tracedata, unsigned int user); +extern void generate_resume_trace(const void *tracedata, unsigned int user); #define TRACE_DEVICE(dev) do { \ if (pm_trace_enabled) \ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index e3ab21d7fc7f..c5f6e54ec6ae 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -34,26 +34,37 @@ * RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. * RFKILL_TYPE_UWB: switch is on a ultra wideband device. * RFKILL_TYPE_WIMAX: switch is on a WiMAX device. + * RFKILL_TYPE_WWAN: switch is on a wireless WAN device. */ enum rfkill_type { RFKILL_TYPE_WLAN , RFKILL_TYPE_BLUETOOTH, RFKILL_TYPE_UWB, RFKILL_TYPE_WIMAX, + RFKILL_TYPE_WWAN, RFKILL_TYPE_MAX, }; enum rfkill_state { - RFKILL_STATE_OFF = 0, - RFKILL_STATE_ON = 1, + RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ + RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ + RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ }; +/* + * These are DEPRECATED, drivers using them should be verified to + * comply with the rfkill usage guidelines in Documentation/rfkill.txt + * and then converted to use the new names for rfkill_state + */ +#define RFKILL_STATE_OFF RFKILL_STATE_SOFT_BLOCKED +#define RFKILL_STATE_ON RFKILL_STATE_UNBLOCKED + /** * struct rfkill - rfkill control structure. * @name: Name of the switch. * @type: Radio type which the button controls, the value stored * here should be a value from enum rfkill_type. - * @state: State of the switch (on/off). + * @state: State of the switch, "UNBLOCKED" means radio can operate. * @user_claim_unsupported: Whether the hardware supports exclusive * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. @@ -61,6 +72,12 @@ enum rfkill_state { * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. + * only RFKILL_STATE_SOFT_BLOCKED and RFKILL_STATE_UNBLOCKED are + * valid parameters. + * @get_state(): handler to read current radio state from hardware, + * may be called from atomic context, should return 0 on success. + * Either this handler OR judicious use of rfkill_force_state() is + * MANDATORY for any driver capable of RFKILL_STATE_HARD_BLOCKED. * @led_trigger: A LED trigger for this button's LED. * @dev: Device structure integrating the switch into device tree. * @node: Used to place switch into list of all switches known to the @@ -80,6 +97,7 @@ struct rfkill { void *data; int (*toggle_radio)(void *data, enum rfkill_state state); + int (*get_state)(void *data, enum rfkill_state *state); #ifdef CONFIG_RFKILL_LEDS struct led_trigger led_trigger; @@ -95,6 +113,21 @@ void rfkill_free(struct rfkill *rfkill); int rfkill_register(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); +int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); + +/** + * rfkill_state_complement - return complementar state + * @state: state to return the complement of + * + * Returns RFKILL_STATE_SOFT_BLOCKED if @state is RFKILL_STATE_UNBLOCKED, + * returns RFKILL_STATE_UNBLOCKED otherwise. + */ +static inline enum rfkill_state rfkill_state_complement(enum rfkill_state state) +{ + return (state == RFKILL_STATE_UNBLOCKED) ? + RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; +} + /** * rfkill_get_led_name - Get the LED trigger name for the button's LED. * This function might return a NULL pointer if registering of the @@ -110,4 +143,11 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill) #endif } +/* rfkill notification chain */ +#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill + switch has changed */ + +int register_rfkill_notifier(struct notifier_block *nb); +int unregister_rfkill_notifier(struct notifier_block *nb); + #endif /* RFKILL_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index b358c704d102..f4d386c191f5 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -482,6 +482,7 @@ enum TCA_RATE, TCA_FCNT, TCA_STATS2, + TCA_STAB, __TCA_MAX }; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 71fc81360048..e5996984ddd0 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, */ #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + +/* + * Mapping sg iterator + * + * Iterates over sg entries mapping page-by-page. On each successful + * iteration, @miter->page points to the mapped page and + * @miter->length bytes of data can be accessed at @miter->addr. As + * long as an interation is enclosed between start and stop, the user + * is free to choose control structure and when to stop. + * + * @miter->consumed is set to @miter->length on each iteration. It + * can be adjusted if the user can't consume all the bytes in one go. + * Also, a stopped iteration can be resumed by calling next on it. + * This is useful when iteration needs to release all resources and + * continue later (e.g. at the next interrupt). + */ + +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ + +struct sg_mapping_iter { + /* the following three fields can be accessed directly */ + struct page *page; /* currently mapped page */ + void *addr; /* pointer to the mapped area */ + size_t length; /* length of the mapped area */ + size_t consumed; /* number of consumed bytes */ + + /* these are internal states, keep away */ + struct scatterlist *__sg; /* current entry */ + unsigned int __nents; /* nr of remaining entries */ + unsigned int __offset; /* offset within sg */ + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c5d3f847ca8d..dc7e592c473a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -134,7 +134,6 @@ extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); -extern unsigned long weighted_cpuload(const int cpu); struct seq_file; struct cfs_rq; @@ -246,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); +extern int runqueue_is_locked(void); + extern cpumask_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); @@ -294,10 +295,11 @@ extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern unsigned long softlockup_thresh; +extern unsigned int softlockup_panic; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { @@ -784,6 +786,8 @@ struct sched_domain { unsigned int balance_interval; /* initialise to 1. units in ms. */ unsigned int nr_balance_failed; /* initialise to 0 */ + u64 last_update; + #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; @@ -821,24 +825,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, struct sched_domain_attr *dattr_new); extern int arch_reinit_sched_domains(void); -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ -/* - * A runqueue laden with a single nice 0 task scores a weighted_cpuload of - * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a - * task of nice 0 or enough lower priority tasks to bring up the - * weighted_cpuload - */ -static inline int above_background_load(void) -{ - unsigned long cpu; +struct sched_domain_attr; - for_each_online_cpu(cpu) { - if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) - return 1; - } - return 0; +static inline void +partition_sched_domains(int ndoms_new, cpumask_t *doms_new, + struct sched_domain_attr *dattr_new) +{ } +#endif /* !CONFIG_SMP */ struct io_context; /* See blkdev.h */ #define NGROUPS_SMALL 32 @@ -921,8 +917,8 @@ struct sched_class { void (*set_cpus_allowed)(struct task_struct *p, const cpumask_t *newmask); - void (*join_domain)(struct rq *rq); - void (*leave_domain)(struct rq *rq); + void (*rq_online)(struct rq *rq); + void (*rq_offline)(struct rq *rq); void (*switched_from) (struct rq *this_rq, struct task_struct *task, int running); @@ -1039,6 +1035,7 @@ struct task_struct { #endif int prio, static_prio, normal_prio; + unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; @@ -1075,12 +1072,6 @@ struct task_struct { #endif struct list_head tasks; - /* - * ptrace_list/ptrace_children forms the list of my children - * that were stolen by a ptracer. - */ - struct list_head ptrace_children; - struct list_head ptrace_list; struct mm_struct *mm, *active_mm; @@ -1102,18 +1093,25 @@ struct task_struct { /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with - * p->parent->pid) + * p->real_parent->pid) */ - struct task_struct *real_parent; /* real parent process (when being debugged) */ - struct task_struct *parent; /* parent process */ + struct task_struct *real_parent; /* real parent process */ + struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ /* - * children/sibling forms the list of my children plus the - * tasks I'm ptracing. + * children/sibling forms the list of my natural children */ struct list_head children; /* list of my children */ struct list_head sibling; /* linkage in my parent's children list */ struct task_struct *group_leader; /* threadgroup leader */ + /* + * ptraced is the list of tasks this task is using ptrace on. + * This includes both natural children and PTRACE_ATTACH targets. + * p->ptrace_entry is p's link on the p->parent->ptraced list. + */ + struct list_head ptraced; + struct list_head ptrace_entry; + /* PID/PID hash table linkage. */ struct pid_link pids[PIDTYPE_MAX]; struct list_head thread_group; @@ -1122,7 +1120,6 @@ struct task_struct { int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ - unsigned int rt_priority; cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; cputime_t prev_utime, prev_stime; @@ -1141,12 +1138,12 @@ struct task_struct { gid_t gid,egid,sgid,fsgid; struct group_info *group_info; kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; - unsigned securebits; struct user_struct *user; + unsigned securebits; #ifdef CONFIG_KEYS + unsigned char jit_keyring; /* default keyring to attach requested keys to */ struct key *request_key_auth; /* assumed request_key authority */ struct key *thread_keyring; /* keyring private to this thread */ - unsigned char jit_keyring; /* default keyring to attach requested keys to */ #endif char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock @@ -1233,8 +1230,8 @@ struct task_struct { # define MAX_LOCK_DEPTH 48UL u64 curr_chain_key; int lockdep_depth; - struct held_lock held_locks[MAX_LOCK_DEPTH]; unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; #endif /* journalling filesystem info */ @@ -1262,10 +1259,6 @@ struct task_struct { u64 acct_vm_mem1; /* accumulated virtual memory usage */ cputime_t acct_stimexpd;/* stime since last update */ #endif -#ifdef CONFIG_NUMA - struct mempolicy *mempolicy; - short il_next; -#endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; int cpuset_mems_generation; @@ -1285,6 +1278,10 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif +#ifdef CONFIG_NUMA + struct mempolicy *mempolicy; + short il_next; +#endif atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -1504,9 +1501,11 @@ static inline void put_task_struct(struct task_struct *t) #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ +#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ +#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1573,13 +1572,28 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } -#else + +#ifdef CONFIG_NO_HZ +static inline void sched_clock_tick_stop(int cpu) +{ +} + +static inline void sched_clock_tick_start(int cpu) +{ +} +#endif + +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ extern void sched_clock_init(void); extern u64 sched_clock_cpu(int cpu); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); +#ifdef CONFIG_NO_HZ +extern void sched_clock_tick_stop(int cpu); +extern void sched_clock_tick_start(int cpu); #endif +#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu @@ -1622,6 +1636,7 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_shares_ratelimit; int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, @@ -1655,6 +1670,8 @@ extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, + struct sched_param *); extern struct task_struct *idle_task(int cpu); extern struct task_struct *curr_task(int cpu); extern void set_curr_task(int cpu, struct task_struct *p); @@ -1870,9 +1887,6 @@ extern void wait_task_inactive(struct task_struct * p); #define wait_task_inactive(p) do { } while (0) #endif -#define remove_parent(p) list_del_init(&(p)->sibling) -#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) - #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) #define for_each_process(p) \ @@ -2131,6 +2145,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) } #endif +#ifdef CONFIG_TRACING +extern void +__trace_special(void *__tr, void *__data, + unsigned long arg1, unsigned long arg2, unsigned long arg3); +#else +static inline void +__trace_special(void *__tr, void *__data, + unsigned long arg1, unsigned long arg2, unsigned long arg3) +{ +} +#endif + extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); @@ -2225,6 +2251,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) } #endif /* CONFIG_MM_OWNER */ +#define TASK_STATE_TO_CHAR_STR "RSDTtZX" + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/security.h b/include/linux/security.h index 50737c70e78e..31c8851ec5d0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -46,7 +46,8 @@ struct audit_krule; */ extern int cap_capable(struct task_struct *tsk, int cap); extern int cap_settime(struct timespec *ts, struct timezone *tz); -extern int cap_ptrace(struct task_struct *parent, struct task_struct *child); +extern int cap_ptrace(struct task_struct *parent, struct task_struct *child, + unsigned int mode); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -79,6 +80,7 @@ struct xfrm_selector; struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; +struct seq_file; extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); extern int cap_netlink_recv(struct sk_buff *skb, int cap); @@ -289,10 +291,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Update module state after a successful pivot. * @old_path contains the path for the old root. * @new_path contains the path for the new root. - * @sb_get_mnt_opts: - * Get the security relevant mount options used for a superblock - * @sb the superblock to get security mount options from - * @opts binary data structure containing all lsm mount data * @sb_set_mnt_opts: * Set the security relevant mount options used for a superblock * @sb the superblock to set security mount options for @@ -1170,6 +1168,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * attributes would be changed by the execve. * @parent contains the task_struct structure for parent process. * @child contains the task_struct structure for child process. + * @mode contains the PTRACE_MODE flags indicating the form of access. * Return 0 if permission is granted. * @capget: * Get the @effective, @inheritable, and @permitted capability sets for @@ -1240,11 +1239,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @pages contains the number of pages. * Return 0 if permission is granted. * - * @register_security: - * allow module stacking. - * @name contains the name of the security module being stacked. - * @ops contains a pointer to the struct security_operations of the module to stack. - * * @secid_to_secctx: * Convert secid to security context. * @secid contains the security ID. @@ -1295,7 +1289,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) struct security_operations { char name[SECURITY_NAME_MAX + 1]; - int (*ptrace) (struct task_struct *parent, struct task_struct *child); + int (*ptrace) (struct task_struct *parent, struct task_struct *child, + unsigned int mode); int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1328,6 +1323,7 @@ struct security_operations { void (*sb_free_security) (struct super_block *sb); int (*sb_copy_data) (char *orig, char *copy); int (*sb_kern_mount) (struct super_block *sb, void *data); + int (*sb_show_options) (struct seq_file *m, struct super_block *sb); int (*sb_statfs) (struct dentry *dentry); int (*sb_mount) (char *dev_name, struct path *path, char *type, unsigned long flags, void *data); @@ -1343,8 +1339,6 @@ struct security_operations { struct path *new_path); void (*sb_post_pivotroot) (struct path *old_path, struct path *new_path); - int (*sb_get_mnt_opts) (const struct super_block *sb, - struct security_mnt_opts *opts); int (*sb_set_mnt_opts) (struct super_block *sb, struct security_mnt_opts *opts); void (*sb_clone_mnt_opts) (const struct super_block *oldsb, @@ -1472,10 +1466,6 @@ struct security_operations { int (*netlink_send) (struct sock *sk, struct sk_buff *skb); int (*netlink_recv) (struct sk_buff *skb, int cap); - /* allow module stacking */ - int (*register_security) (const char *name, - struct security_operations *ops); - void (*d_instantiate) (struct dentry *dentry, struct inode *inode); int (*getprocattr) (struct task_struct *p, char *name, char **value); @@ -1565,7 +1555,6 @@ struct security_operations { extern int security_init(void); extern int security_module_enable(struct security_operations *ops); extern int register_security(struct security_operations *ops); -extern int mod_reg_security(const char *name, struct security_operations *ops); extern struct dentry *securityfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -1573,7 +1562,8 @@ extern struct dentry *securityfs_create_dir(const char *name, struct dentry *par extern void securityfs_remove(struct dentry *dentry); /* Security operations */ -int security_ptrace(struct task_struct *parent, struct task_struct *child); +int security_ptrace(struct task_struct *parent, struct task_struct *child, + unsigned int mode); int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, @@ -1606,6 +1596,7 @@ int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); int security_sb_copy_data(char *orig, char *copy); int security_sb_kern_mount(struct super_block *sb, void *data); +int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data); @@ -1617,8 +1608,6 @@ void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *d void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint); int security_sb_pivotroot(struct path *old_path, struct path *new_path); void security_sb_post_pivotroot(struct path *old_path, struct path *new_path); -int security_sb_get_mnt_opts(const struct super_block *sb, - struct security_mnt_opts *opts); int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); void security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb); @@ -1755,9 +1744,11 @@ static inline int security_init(void) return 0; } -static inline int security_ptrace(struct task_struct *parent, struct task_struct *child) +static inline int security_ptrace(struct task_struct *parent, + struct task_struct *child, + unsigned int mode) { - return cap_ptrace(parent, child); + return cap_ptrace(parent, child, mode); } static inline int security_capget(struct task_struct *target, @@ -1881,6 +1872,12 @@ static inline int security_sb_kern_mount(struct super_block *sb, void *data) return 0; } +static inline int security_sb_show_options(struct seq_file *m, + struct super_block *sb) +{ + return 0; +} + static inline int security_sb_statfs(struct dentry *dentry) { return 0; @@ -1927,12 +1924,6 @@ static inline int security_sb_pivotroot(struct path *old_path, static inline void security_sb_post_pivotroot(struct path *old_path, struct path *new_path) { } -static inline int security_sb_get_mnt_opts(const struct super_block *sb, - struct security_mnt_opts *opts) -{ - security_init_mnt_opts(opts); - return 0; -} static inline int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts) diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h index 4ac52542a563..32c89bbe24a2 100644 --- a/include/linux/seq_file_net.h +++ b/include/linux/seq_file_net.h @@ -14,7 +14,10 @@ struct seq_net_private { int seq_open_net(struct inode *, struct file *, const struct seq_operations *, int); +int single_open_net(struct inode *, struct file *file, + int (*show)(struct seq_file *, void *)); int seq_release_net(struct inode *, struct file *); +int single_release_net(struct inode *, struct file *); static inline struct net *seq_file_net(struct seq_file *seq) { #ifdef CONFIG_NET_NS diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index d8f31de632c5..f3a1c0e45021 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -190,6 +190,7 @@ struct uart_ops { void (*break_ctl)(struct uart_port *, int ctl); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); void (*set_ldisc)(struct uart_port *); @@ -343,13 +344,15 @@ typedef unsigned int __bitwise__ uif_t; * stuff here. */ struct uart_info { - struct tty_struct *tty; + struct tty_port port; struct circ_buf xmit; uif_t flags; /* * Definitions for info->flags. These are _private_ to serial_core, and * are specific to this structure. They may be queried by low level drivers. + * + * FIXME: use the ASY_ definitions */ #define UIF_CHECK_CD ((__force uif_t) (1 << 25)) #define UIF_CTS_FLOW ((__force uif_t) (1 << 26)) @@ -357,11 +360,7 @@ struct uart_info { #define UIF_INITIALIZED ((__force uif_t) (1 << 31)) #define UIF_SUSPENDED ((__force uif_t) (1 << 30)) - int blocked_open; - struct tasklet_struct tlet; - - wait_queue_head_t open_wait; wait_queue_head_t delta_msr_wait; }; @@ -438,8 +437,8 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); #define uart_circ_chars_free(circ) \ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) -#define uart_tx_stopped(port) \ - ((port)->info->tty->stopped || (port)->info->tty->hw_stopped) +#define uart_tx_stopped(portp) \ + ((portp)->info->port.tty->stopped || (portp)->info->port.tty->hw_stopped) /* * The following are helper functions for the low level drivers. @@ -450,7 +449,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) #ifdef SUPPORT_SYSRQ if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, port->info ? port->info->tty : NULL); + handle_sysrq(ch, port->info ? port->info->port.tty : NULL); port->sysrq = 0; return 1; } @@ -479,7 +478,7 @@ static inline int uart_handle_break(struct uart_port *port) } #endif if (port->flags & UPF_SAK) - do_SAK(info->tty); + do_SAK(info->port.tty); return 0; } @@ -502,9 +501,9 @@ uart_handle_dcd_change(struct uart_port *port, unsigned int status) if (info->flags & UIF_CHECK_CD) { if (status) - wake_up_interruptible(&info->open_wait); - else if (info->tty) - tty_hangup(info->tty); + wake_up_interruptible(&info->port.open_wait); + else if (info->port.tty) + tty_hangup(info->port.tty); } } @@ -517,7 +516,7 @@ static inline void uart_handle_cts_change(struct uart_port *port, unsigned int status) { struct uart_info *info = port->info; - struct tty_struct *tty = info->tty; + struct tty_struct *tty = info->port.tty; port->icount.cts++; @@ -543,7 +542,7 @@ static inline void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag) { - struct tty_struct *tty = port->info->tty; + struct tty_struct *tty = port->info->port.tty; if ((status & port->ignore_status_mask & ~overrun) == 0) tty_insert_flip_char(tty, ch, flag); diff --git a/include/linux/serio.h b/include/linux/serio.h index 95674d97dabd..e72716cca577 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -175,7 +175,7 @@ static inline void serio_unpin_driver(struct serio *serio) #define SERIO_8042_XL 0x06 /* - * Serio types + * Serio protocols */ #define SERIO_UNKNOWN 0x00 #define SERIO_MSC 0x01 @@ -212,5 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio) #define SERIO_TAOSEVM 0x34 #define SERIO_FUJITSU 0x35 #define SERIO_ZHENHUA 0x36 +#define SERIO_INEXIO 0x37 +#define SERIO_TOUCHIT213 0x37 #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 299ec4b31412..7ea44f6621f2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -246,6 +246,7 @@ typedef unsigned char *sk_buff_data_t; * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking + * @vlan_tci: vlan tag control information */ struct sk_buff { @@ -305,9 +306,7 @@ struct sk_buff { #endif int iif; -#ifdef CONFIG_NETDEVICES_MULTIQUEUE __u16 queue_mapping; -#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT @@ -328,6 +327,8 @@ struct sk_buff { __u32 mark; + __u16 vlan_tci; + sk_buff_data_t transport_header; sk_buff_data_t network_header; sk_buff_data_t mac_header; @@ -1671,25 +1672,17 @@ static inline void skb_init_secmark(struct sk_buff *skb) static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE skb->queue_mapping = queue_mapping; -#endif } static inline u16 skb_get_queue_mapping(struct sk_buff *skb) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE return skb->queue_mapping; -#else - return 0; -#endif } static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) { -#ifdef CONFIG_NETDEVICES_MULTIQUEUE to->queue_mapping = from->queue_mapping; -#endif } static inline int skb_is_gso(const struct sk_buff *skb) @@ -1702,6 +1695,20 @@ static inline int skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + struct skb_shared_info *shinfo = skb_shinfo(skb); + if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + static inline void skb_forward_csum(struct sk_buff *skb) { /* Unfortunately we don't support this one. Any brave souls? */ diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h new file mode 100644 index 000000000000..b58f54c24183 --- /dev/null +++ b/include/linux/smc911x.h @@ -0,0 +1,12 @@ +#ifndef __SMC911X_H__ +#define __SMC911X_H__ + +#define SMC911X_USE_16BIT (1 << 0) +#define SMC911X_USE_32BIT (1 << 1) + +struct smc911x_platdata { + unsigned long flags; + unsigned long irq_flags; /* IRQF_... */ +}; + +#endif /* __SMC911X_H__ */ diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 8e0556b8781c..3827b922ba1f 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -5,9 +5,19 @@ #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) +#define SMC91X_NOWAIT (1 << 3) + +/* two bits for IO_SHIFT, let's hope later designs will keep this sane */ +#define SMC91X_IO_SHIFT_0 (0 << 4) +#define SMC91X_IO_SHIFT_1 (1 << 4) +#define SMC91X_IO_SHIFT_2 (2 << 4) +#define SMC91X_IO_SHIFT_3 (3 << 4) +#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) + +#define SMC91X_USE_DMA (1 << 6) + struct smc91x_platdata { unsigned long flags; - unsigned long irq_flags; /* IRQF_... */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 55232ccf9cfd..48262f86c969 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -7,9 +7,18 @@ */ #include <linux/errno.h> +#include <linux/list.h> +#include <linux/cpumask.h> extern void cpu_idle(void); +struct call_single_data { + struct list_head list; + void (*func) (void *info); + void *info; + unsigned int flags; +}; + #ifdef CONFIG_SMP #include <linux/preempt.h> @@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); - +int smp_call_function(void(*func)(void *info), void *info, int wait); +int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, + int wait); int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, - int retry, int wait); + int wait); +void __smp_call_function_single(int cpuid, struct call_single_data *data); + +/* + * Generic and arch helpers + */ +#ifdef CONFIG_USE_GENERIC_SMP_HELPERS +void generic_smp_call_function_single_interrupt(void); +void generic_smp_call_function_interrupt(void); +void init_call_single_data(void); +void ipi_call_lock(void); +void ipi_call_unlock(void); +void ipi_call_lock_irq(void); +void ipi_call_unlock_irq(void); +#else +static inline void init_call_single_data(void) +{ +} +#endif /* * Call a function on all processors */ -int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); +int on_each_cpu(void (*func) (void *info), void *info, int wait); #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) { return 0; } -#define smp_call_function(func, info, retry, wait) \ +#define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) -#define on_each_cpu(func,info,retry,wait) \ +#define on_each_cpu(func,info,wait) \ ({ \ local_irq_disable(); \ func(info); \ @@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) -#define smp_call_function_single(cpuid, func, info, retry, wait) \ +#define smp_call_function_single(cpuid, func, info, wait) \ ({ \ WARN_ON(cpuid != 0); \ local_irq_disable(); \ @@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { } }) #define smp_call_function_mask(mask, func, info, wait) \ (up_smp_call_function(func, info)) - +static inline void init_call_single_data(void) +{ +} #endif /* !SMP */ /* diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index aab3a4cff4e1..813be59bf345 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -27,11 +27,24 @@ static inline int reacquire_kernel_lock(struct task_struct *task) extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); +/* + * Various legacy drivers don't really need the BKL in a specific + * function, but they *do* need to know that the BKL became available. + * This function just avoids wrapping a bunch of lock/unlock pairs + * around code which doesn't really need it. + */ +static inline void cycle_kernel_lock(void) +{ + lock_kernel(); + unlock_kernel(); +} + #else #define lock_kernel() do { } while(0) #define unlock_kernel() do { } while(0) #define release_kernel_lock(task) do { } while(0) +#define cycle_kernel_lock() do { } while(0) #define reacquire_kernel_lock(task) 0 #define kernel_locked() 1 diff --git a/include/linux/socket.h b/include/linux/socket.h index bd2b30a74e76..950af631e7fb 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -306,10 +306,10 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, int offset, unsigned int len, __wsum *csump); -extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode); +extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); -extern int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ulen); -extern int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr); +extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen); +extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); #endif diff --git a/include/linux/sonet.h b/include/linux/sonet.h index 753680296e17..67ad11fcf88b 100644 --- a/include/linux/sonet.h +++ b/include/linux/sonet.h @@ -34,7 +34,7 @@ struct sonet_stats { /* clear error insertion */ #define SONET_GETDIAG _IOR('a',ATMIOC_PHYTYP+4,int) /* query error insertion */ -#define SONET_SETFRAMING _IO('a',ATMIOC_PHYTYP+5) +#define SONET_SETFRAMING _IOW('a',ATMIOC_PHYTYP+5,int) /* set framing mode (SONET/SDH) */ #define SONET_GETFRAMING _IOR('a',ATMIOC_PHYTYP+6,int) /* get framing mode */ diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h new file mode 100644 index 000000000000..6dfd83f19b4b --- /dev/null +++ b/include/linux/spi/max7301.h @@ -0,0 +1,9 @@ +#ifndef LINUX_SPI_MAX7301_H +#define LINUX_SPI_MAX7301_H + +struct max7301_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index d5ca78b93a3b..a3626aedaec9 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -23,6 +23,15 @@ struct mmc_spi_platform_data { /* sense switch on sd cards */ int (*get_ro)(struct device *); + /* + * If board does not use CD interrupts, driver can optimize polling + * using this function. + */ + int (*get_cd)(struct device *); + + /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ + unsigned long caps; + /* how long to debounce card detect, in msecs */ u16 detect_delay; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 387e428f1cdf..b9a76c972084 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -733,7 +733,7 @@ struct spi_board_info { * controller_data goes to spi_device.controller_data, * irq is copied too */ - char modalias[KOBJ_NAME_LEN]; + char modalias[32]; const void *platform_data; void *controller_data; int irq; diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 50dfd0dc4093..4bf8cade9dbc 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -7,6 +7,7 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/mod_devicetable.h> +#include <linux/dma-mapping.h> #include <linux/ssb/ssb_regs.h> @@ -137,9 +138,6 @@ struct ssb_device { const struct ssb_bus_ops *ops; struct device *dev; - /* Pointer to the device that has to be used for - * any DMA related operation. */ - struct device *dma_dev; struct ssb_bus *bus; struct ssb_device_id id; @@ -399,13 +397,151 @@ static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, #endif /* CONFIG_SSB_BLOCKIO */ +/* The SSB DMA API. Use this API for any DMA operation on the device. + * This API basically is a wrapper that calls the correct DMA API for + * the host device type the SSB device is attached to. */ + /* Translation (routing) bits that need to be ORed to DMA * addresses before they are given to a device. */ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 -extern int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask); +extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask); + +extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_flags); +extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + gfp_t gfp_flags); + +static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) +{ +#ifdef CONFIG_SSB_DEBUG + printk(KERN_ERR "SSB: BUG! Calling DMA API for " + "unsupported bustype %d\n", dev->bus->bustype); +#endif /* DEBUG */ +} + +static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + return pci_dma_mapping_error(addr); + case SSB_BUSTYPE_SSB: + return dma_mapping_error(addr); + default: + __ssb_dma_not_implemented(dev); + } + return -ENOSYS; +} + +static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, + size_t size, enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + return pci_map_single(dev->bus->host_pci, p, size, dir); + case SSB_BUSTYPE_SSB: + return dma_map_single(dev->dev, p, size, dir); + default: + __ssb_dma_not_implemented(dev); + } + return 0; +} + +static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); + return; + case SSB_BUSTYPE_SSB: + dma_unmap_single(dev->dev, dma_addr, size, dir); + return; + default: + __ssb_dma_not_implemented(dev); + } +} + +static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, + size, dir); + return; + case SSB_BUSTYPE_SSB: + dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); + return; + default: + __ssb_dma_not_implemented(dev); + } +} + +static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, + size, dir); + return; + case SSB_BUSTYPE_SSB: + dma_sync_single_for_device(dev->dev, dma_addr, size, dir); + return; + default: + __ssb_dma_not_implemented(dev); + } +} + +static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, + dma_addr_t dma_addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + /* Just sync everything. That's all the PCI API can do. */ + pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, + offset + size, dir); + return; + case SSB_BUSTYPE_SSB: + dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, + size, dir); + return; + default: + __ssb_dma_not_implemented(dev); + } +} + +static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, + dma_addr_t dma_addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: + /* Just sync everything. That's all the PCI API can do. */ + pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, + offset + size, dir); + return; + case SSB_BUSTYPE_SSB: + dma_sync_single_range_for_device(dev->dev, dma_addr, offset, + size, dir); + return; + default: + __ssb_dma_not_implemented(dev); + } +} #ifdef CONFIG_SSB_PCIHOST diff --git a/include/linux/stallion.h b/include/linux/stallion.h index 0424d75a5aaa..336af33c6ea4 100644 --- a/include/linux/stallion.h +++ b/include/linux/stallion.h @@ -69,6 +69,7 @@ struct stlrq { */ struct stlport { unsigned long magic; + struct tty_port port; unsigned int portnr; unsigned int panelnr; unsigned int brdnr; @@ -76,12 +77,10 @@ struct stlport { int uartaddr; unsigned int pagenr; unsigned long istate; - int flags; int baud_base; int custom_divisor; int close_delay; int closing_wait; - int refcount; int openwaitcnt; int brklen; unsigned int sigs; @@ -92,9 +91,6 @@ struct stlport { unsigned long clk; unsigned long hwid; void *uartp; - struct tty_struct *tty; - wait_queue_head_t open_wait; - wait_queue_head_t close_wait; comstats_t stats; struct stlrq tx; }; diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index fec6899bf355..d48d4e605f74 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h @@ -7,8 +7,6 @@ * Andy Adamson <andros@umich.edu> * Bruce Fields <bfields@umich.edu> * Copyright (c) 2000 The Regents of the University of Michigan - * - * $Id$ */ #ifndef _LINUX_SUNRPC_AUTH_GSS_H diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6fff7f82ef12..e5bfe01ee305 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -42,7 +42,8 @@ struct rpc_clnt { unsigned int cl_softrtry : 1,/* soft timeouts */ cl_discrtry : 1,/* disconnect before retry */ - cl_autobind : 1;/* use getport() */ + cl_autobind : 1,/* use getport() */ + cl_chatty : 1;/* be verbose */ struct rpc_rtt * cl_rtt; /* RTO estimator data */ const struct rpc_timeout *cl_timeout; /* Timeout strategy */ @@ -114,6 +115,7 @@ struct rpc_create_args { #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) #define RPC_CLNT_CREATE_NOPING (1UL << 4) #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) +#define RPC_CLNT_CREATE_QUIET (1UL << 6) struct rpc_clnt *rpc_create(struct rpc_create_args *args); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, @@ -123,6 +125,9 @@ void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); int rpcb_register(u32, u32, int, unsigned short, int *); +int rpcb_v4_register(const u32 program, const u32 version, + const struct sockaddr *address, + const char *netid, int *result); int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); void rpcb_getport_async(struct rpc_task *); diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 459c5fc11d51..03f33330ece2 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -7,8 +7,6 @@ * Andy Adamson <andros@umich.edu> * Bruce Fields <bfields@umich.edu> * Copyright (c) 2000 The Regents of the University of Michigan - * - * $Id$ */ #ifndef _LINUX_SUNRPC_GSS_API_H diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index a10f1fb0bf7c..e7bbdba474d5 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -51,6 +51,9 @@ struct krb5_ctx { extern spinlock_t krb5_seq_lock; +/* The length of the Kerberos GSS token header */ +#define GSS_KRB5_TOK_HDR_LEN (16) + #define KG_TOK_MIC_MSG 0x0101 #define KG_TOK_WRAP_MSG 0x0201 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index d1a5c8c1a0f1..64981a2f1cae 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -135,7 +135,6 @@ struct rpc_task_setup { #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) -#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) #define RPC_TASK_RUNNING 0 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 4b54c5fdcfd9..dc69068d94c7 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -22,7 +22,7 @@ /* * This is the RPC server thread function prototype */ -typedef void (*svc_thread_fn)(struct svc_rqst *); +typedef int (*svc_thread_fn)(void *); /* * @@ -80,7 +80,6 @@ struct svc_serv { struct module * sv_module; /* optional module to count when * adding threads */ svc_thread_fn sv_function; /* main function for threads */ - int sv_kill_signal; /* signal to kill threads */ }; /* @@ -388,8 +387,8 @@ struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool); void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, - void (*shutdown)(struct svc_serv*), - svc_thread_fn, int sig, struct module *); + void (*shutdown)(struct svc_serv*), svc_thread_fn, + struct module *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); void svc_destroy(struct svc_serv *); int svc_process(struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 05eb4664d0dd..ef2e3a20bf3b 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod; */ struct svc_rdma_op_ctxt { struct svc_rdma_op_ctxt *read_hdr; - struct list_head free_list; + int hdr_count; struct xdr_buf arg; struct list_head dto_q; enum ib_wr_opcode wr_op; @@ -86,6 +86,31 @@ struct svc_rdma_op_ctxt { struct page *pages[RPCSVC_MAXPAGES]; }; +/* + * NFS_ requests are mapped on the client side by the chunk lists in + * the RPCRDMA header. During the fetching of the RPC from the client + * and the writing of the reply to the client, the memory in the + * client and the memory in the server must be mapped as contiguous + * vaddr/len for access by the hardware. These data strucures keep + * these mappings. + * + * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the + * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the + * 'ch' field maps the read-list of the RPCRDMA header to the 'sge' + * mapping of the reply. + */ +struct svc_rdma_chunk_sge { + int start; /* sge no for this chunk */ + int count; /* sge count for this chunk */ +}; +struct svc_rdma_req_map { + unsigned long count; + union { + struct kvec sge[RPCSVC_MAXPAGES]; + struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; + }; +}; + #define RDMACTXT_F_LAST_CTXT 2 struct svcxprt_rdma { @@ -93,7 +118,6 @@ struct svcxprt_rdma { struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ struct list_head sc_accept_q; /* Conn. waiting accept */ int sc_ord; /* RDMA read limit */ - wait_queue_head_t sc_read_wait; int sc_max_sge; int sc_sq_depth; /* Depth of SQ */ @@ -104,12 +128,8 @@ struct svcxprt_rdma { struct ib_pd *sc_pd; + atomic_t sc_dma_used; atomic_t sc_ctxt_used; - struct list_head sc_ctxt_free; - int sc_ctxt_cnt; - int sc_ctxt_bump; - int sc_ctxt_max; - spinlock_t sc_ctxt_lock; struct list_head sc_rq_dto_q; spinlock_t sc_rq_dto_lock; struct ib_qp *sc_qp; @@ -173,6 +193,8 @@ extern int svc_rdma_post_recv(struct svcxprt_rdma *); extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); +extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); +extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); extern void svc_sq_reap(struct svcxprt_rdma *); extern void svc_rq_reap(struct svcxprt_rdma *); extern struct svc_xprt_class svc_rdma_class; diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index 417a1def56db..c9165d9771a8 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -3,9 +3,6 @@ * * Bruce Fields <bfields@umich.edu> * Copyright (c) 2002 The Regents of the Unviersity of Michigan - * - * $Id$ - * */ #ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H diff --git a/include/linux/suspend.h b/include/linux/suspend.h index a6977423baf7..e8e69159af71 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t; * that implement @begin(), but platforms implementing @begin() should * also provide a @end() which cleans up transitions aborted before * @enter(). + * + * @recover: Recover the platform from a suspend failure. + * Called by the PM core if the suspending of devices fails. + * This callback is optional and should only be implemented by platforms + * which require special recovery actions in that situation. */ struct platform_suspend_ops { int (*valid)(suspend_state_t state); @@ -94,6 +99,7 @@ struct platform_suspend_ops { int (*enter)(suspend_state_t state); void (*finish)(void); void (*end)(void); + void (*recover)(void); }; #ifdef CONFIG_SUSPEND @@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone); * The methods in this structure allow a platform to carry out special * operations required by it during a hibernation transition. * - * All the methods below must be implemented. + * All the methods below, except for @recover(), must be implemented. * * @begin: Tell the platform driver that we're starting hibernation. * Called right after shrinking memory and before freezing devices. @@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone); * @restore_cleanup: Clean up after a failing image restoration. * Called right after the nonboot CPUs have been enabled and before * thawing devices (runs with IRQs on). + * + * @recover: Recover the platform from a failure to suspend devices. + * Called by the PM core if the suspending of devices during hibernation + * fails. This callback is optional and should only be implemented by + * platforms which require special recovery actions in that situation. */ struct platform_hibernation_ops { int (*begin)(void); @@ -200,6 +211,7 @@ struct platform_hibernation_ops { void (*leave)(void); int (*pre_restore)(void); void (*restore_cleanup)(void); + void (*recover)(void); }; #ifdef CONFIG_HIBERNATION diff --git a/include/linux/synclink.h b/include/linux/synclink.h index 45f6bc82d317..c844a229acc9 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h @@ -136,6 +136,7 @@ #define MGSL_INTERFACE_RTS_EN 0x10 #define MGSL_INTERFACE_LL 0x20 #define MGSL_INTERFACE_RL 0x40 +#define MGSL_INTERFACE_MSB_FIRST 0x80 typedef struct _MGSL_PARAMS { diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index f2767bc6b735..f395bb3fa2f2 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -99,8 +99,9 @@ extern void sysdev_unregister(struct sys_device *); struct sysdev_attribute { struct attribute attr; - ssize_t (*show)(struct sys_device *, char *); - ssize_t (*store)(struct sys_device *, const char *, size_t); + ssize_t (*show)(struct sys_device *, struct sysdev_attribute *, char *); + ssize_t (*store)(struct sys_device *, struct sysdev_attribute *, + const char *, size_t); }; @@ -118,4 +119,38 @@ struct sysdev_attribute { extern int sysdev_create_file(struct sys_device *, struct sysdev_attribute *); extern void sysdev_remove_file(struct sys_device *, struct sysdev_attribute *); +struct sysdev_ext_attribute { + struct sysdev_attribute attr; + void *var; +}; + +/* + * Support for simple variable sysdev attributes. + * The pointer to the variable is stored in a sysdev_ext_attribute + */ + +/* Add more types as needed */ + +extern ssize_t sysdev_show_ulong(struct sys_device *, struct sysdev_attribute *, + char *); +extern ssize_t sysdev_store_ulong(struct sys_device *, + struct sysdev_attribute *, const char *, size_t); +extern ssize_t sysdev_show_int(struct sys_device *, struct sysdev_attribute *, + char *); +extern ssize_t sysdev_store_int(struct sys_device *, + struct sysdev_attribute *, const char *, size_t); + +#define _SYSDEV_ULONG_ATTR(_name, _mode, _var) \ + { _SYSDEV_ATTR(_name, _mode, sysdev_show_ulong, sysdev_store_ulong), \ + &(_var) } +#define SYSDEV_ULONG_ATTR(_name, _mode, _var) \ + struct sysdev_ext_attribute attr_##_name = \ + _SYSDEV_ULONG_ATTR(_name, _mode, _var); +#define _SYSDEV_INT_ATTR(_name, _mode, _var) \ + { _SYSDEV_ATTR(_name, _mode, sysdev_show_int, sysdev_store_int), \ + &(_var) } +#define SYSDEV_INT_ATTR(_name, _mode, _var) \ + struct sysdev_ext_attribute attr_##_name = \ + _SYSDEV_INT_ATTR(_name, _mode, _var); + #endif /* _SYSDEV_H_ */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 7858eac40aa7..37fa24152bd8 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -101,6 +101,9 @@ void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr); int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name); +int __must_check sysfs_create_link_nowarn(struct kobject *kobj, + struct kobject *target, + const char *name); void sysfs_remove_link(struct kobject *kobj, const char *name); int __must_check sysfs_create_group(struct kobject *kobj, @@ -180,6 +183,13 @@ static inline int sysfs_create_link(struct kobject *kobj, return 0; } +static inline int sysfs_create_link_nowarn(struct kobject *kobj, + struct kobject *target, + const char *name) +{ + return 0; +} + static inline void sysfs_remove_link(struct kobject *kobj, const char *name) { } diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b31b6b74aa28..2e2557388e36 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -224,6 +224,12 @@ struct tcp_options_received { u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ }; +/* This is the max number of SACKS that we'll generate and process. It's safe + * to increse this, although since: + * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8) + * only four options will fit in a standard TCP header */ +#define TCP_NUM_SACKS 4 + struct tcp_request_sock { struct inet_request_sock req; #ifdef CONFIG_TCP_MD5SIG @@ -291,10 +297,9 @@ struct tcp_sock { u32 rcv_ssthresh; /* Current window clamp */ u32 frto_highmark; /* snd_nxt when RTO occurred */ - u8 reordering; /* Packet reordering metric. */ + u16 advmss; /* Advertised MSS */ u8 frto_counter; /* Number of new acks after RTO */ u8 nonagle; /* Disable Nagle algorithm? */ - u8 keepalive_probes; /* num of allowed keep alive probes */ /* RTT measurement */ u32 srtt; /* smoothed round trip time << 3 */ @@ -305,6 +310,10 @@ struct tcp_sock { u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ + + u16 urg_data; /* Saved octet of OOB data and control flags */ + u8 urg_mode; /* In urgent mode */ + u8 ecn_flags; /* ECN status bits. */ /* * Options received (usually on last packet, some only on SYN packets). */ @@ -320,13 +329,24 @@ struct tcp_sock { u32 snd_cwnd_used; u32 snd_cwnd_stamp; - struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ - u32 rcv_wnd; /* Current receiver window */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ + u32 lost_out; /* Lost packets */ + u32 sacked_out; /* SACK'd packets */ + u32 fackets_out; /* FACK'd packets */ + u32 tso_deferred; + u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */ -/* SACKs data */ + /* from STCP, retrans queue hinting */ + struct sk_buff* lost_skb_hint; + struct sk_buff *scoreboard_skb_hint; + struct sk_buff *retransmit_skb_hint; + struct sk_buff *forward_skb_hint; + + struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ + + /* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ @@ -337,23 +357,14 @@ struct tcp_sock { * sacked_out > 0) */ - /* from STCP, retrans queue hinting */ - struct sk_buff* lost_skb_hint; - - struct sk_buff *scoreboard_skb_hint; - struct sk_buff *retransmit_skb_hint; - struct sk_buff *forward_skb_hint; - int lost_cnt_hint; int retransmit_cnt_hint; u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u16 advmss; /* Advertised MSS */ + u8 reordering; /* Packet reordering metric. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ - u32 lost_out; /* Lost packets */ - u32 sacked_out; /* SACK'd packets */ - u32 fackets_out; /* FACK'd packets */ u32 high_seq; /* snd_nxt at onset of congestion */ u32 retrans_stamp; /* Timestamp of the last retransmit, @@ -361,23 +372,16 @@ struct tcp_sock { * the first SYN. */ u32 undo_marker; /* tracking retrans started here. */ int undo_retrans; /* number of undoable retransmissions. */ + u32 total_retrans; /* Total retransmits for entire connection */ + u32 urg_seq; /* Seq of received urgent pointer */ - u16 urg_data; /* Saved octet of OOB data and control flags */ - u8 urg_mode; /* In urgent mode */ - u8 ecn_flags; /* ECN status bits. */ u32 snd_up; /* Urgent pointer */ - u32 total_retrans; /* Total retransmits for entire connection */ - u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */ - unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ - int linger2; unsigned long last_synq_overflow; - u32 tso_deferred; - /* Receiver side RTT estimation */ struct { u32 rtt; @@ -405,6 +409,8 @@ struct tcp_sock { /* TCP MD5 Signagure Option information */ struct tcp_md5sig_info *md5sig_info; #endif + + int linger2; }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 6f371f24160b..d9a85d616385 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -10,10 +10,8 @@ struct ts_config; -/** - * TS_AUTOLOAD - Automatically load textsearch modules when needed - */ -#define TS_AUTOLOAD 1 +#define TS_AUTOLOAD 1 /* Automatically load textsearch modules when needed */ +#define TS_IGNORECASE 2 /* Searches string case insensitively */ /** * struct ts_state - search state @@ -39,7 +37,7 @@ struct ts_state struct ts_ops { const char *name; - struct ts_config * (*init)(const void *, unsigned int, gfp_t); + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); unsigned int (*find)(struct ts_config *, struct ts_state *); void (*destroy)(struct ts_config *); @@ -52,12 +50,14 @@ struct ts_ops /** * struct ts_config - search configuration * @ops: operations of chosen algorithm + * @flags: flags * @get_next_block: callback to fetch the next block to search in * @finish: callback to finalize a search */ struct ts_config { struct ts_ops *ops; + int flags; /** * get_next_block - fetch next block of data @@ -162,11 +162,10 @@ static inline struct ts_config *alloc_ts_config(size_t payload, { struct ts_config *conf; - conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); + conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); if (conf == NULL) return ERR_PTR(-ENOMEM); - memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload); return conf; } diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h index b0c916d1f375..2bc6fa4adeb5 100644 --- a/include/linux/tipc_config.h +++ b/include/linux/tipc_config.h @@ -2,7 +2,7 @@ * include/linux/tipc_config.h: Include file for TIPC configuration interface * * Copyright (c) 2003-2006, Ericsson AB - * Copyright (c) 2005, Wind River Systems + * Copyright (c) 2005-2007, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -136,6 +136,14 @@ #define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */ /* + * Reserved commands: + * May not be issued by any process. + * Used internally by TIPC. + */ + +#define TIPC_CMD_NOT_NET_ADMIN 0xC001 /* tx none, rx none */ + +/* * TLV types defined for TIPC */ diff --git a/include/linux/topology.h b/include/linux/topology.h index 24f3d2282e11..2158fc0d5a56 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -179,4 +179,17 @@ void arch_update_cpu_topology(void); #endif #endif /* CONFIG_NUMA */ +#ifndef topology_physical_package_id +#define topology_physical_package_id(cpu) ((void)(cpu), -1) +#endif +#ifndef topology_core_id +#define topology_core_id(cpu) ((void)(cpu), 0) +#endif +#ifndef topology_thread_siblings +#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu) +#endif +#ifndef topology_core_siblings +#define topology_core_siblings(cpu) cpumask_of_cpu(cpu) +#endif + #endif /* _LINUX_TOPOLOGY_H */ diff --git a/include/linux/tty.h b/include/linux/tty.h index 324a3b231d40..e3579cb086e0 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -71,7 +71,8 @@ struct tty_bufhead { struct tty_buffer *head; /* Queue head */ struct tty_buffer *tail; /* Active buffer */ struct tty_buffer *free; /* Free queue head */ - int memory_used; /* Buffer space used excluding free queue */ + int memory_used; /* Buffer space used excluding + free queue */ }; /* * When a break, frame error, or parity error happens, these codes are @@ -101,71 +102,96 @@ struct tty_bufhead { #define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT]) #define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2]) -#define _I_FLAG(tty,f) ((tty)->termios->c_iflag & (f)) -#define _O_FLAG(tty,f) ((tty)->termios->c_oflag & (f)) -#define _C_FLAG(tty,f) ((tty)->termios->c_cflag & (f)) -#define _L_FLAG(tty,f) ((tty)->termios->c_lflag & (f)) - -#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK) -#define I_BRKINT(tty) _I_FLAG((tty),BRKINT) -#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR) -#define I_PARMRK(tty) _I_FLAG((tty),PARMRK) -#define I_INPCK(tty) _I_FLAG((tty),INPCK) -#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP) -#define I_INLCR(tty) _I_FLAG((tty),INLCR) -#define I_IGNCR(tty) _I_FLAG((tty),IGNCR) -#define I_ICRNL(tty) _I_FLAG((tty),ICRNL) -#define I_IUCLC(tty) _I_FLAG((tty),IUCLC) -#define I_IXON(tty) _I_FLAG((tty),IXON) -#define I_IXANY(tty) _I_FLAG((tty),IXANY) -#define I_IXOFF(tty) _I_FLAG((tty),IXOFF) -#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL) -#define I_IUTF8(tty) _I_FLAG((tty),IUTF8) - -#define O_OPOST(tty) _O_FLAG((tty),OPOST) -#define O_OLCUC(tty) _O_FLAG((tty),OLCUC) -#define O_ONLCR(tty) _O_FLAG((tty),ONLCR) -#define O_OCRNL(tty) _O_FLAG((tty),OCRNL) -#define O_ONOCR(tty) _O_FLAG((tty),ONOCR) -#define O_ONLRET(tty) _O_FLAG((tty),ONLRET) -#define O_OFILL(tty) _O_FLAG((tty),OFILL) -#define O_OFDEL(tty) _O_FLAG((tty),OFDEL) -#define O_NLDLY(tty) _O_FLAG((tty),NLDLY) -#define O_CRDLY(tty) _O_FLAG((tty),CRDLY) -#define O_TABDLY(tty) _O_FLAG((tty),TABDLY) -#define O_BSDLY(tty) _O_FLAG((tty),BSDLY) -#define O_VTDLY(tty) _O_FLAG((tty),VTDLY) -#define O_FFDLY(tty) _O_FLAG((tty),FFDLY) - -#define C_BAUD(tty) _C_FLAG((tty),CBAUD) -#define C_CSIZE(tty) _C_FLAG((tty),CSIZE) -#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB) -#define C_CREAD(tty) _C_FLAG((tty),CREAD) -#define C_PARENB(tty) _C_FLAG((tty),PARENB) -#define C_PARODD(tty) _C_FLAG((tty),PARODD) -#define C_HUPCL(tty) _C_FLAG((tty),HUPCL) -#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL) -#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD) -#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS) - -#define L_ISIG(tty) _L_FLAG((tty),ISIG) -#define L_ICANON(tty) _L_FLAG((tty),ICANON) -#define L_XCASE(tty) _L_FLAG((tty),XCASE) -#define L_ECHO(tty) _L_FLAG((tty),ECHO) -#define L_ECHOE(tty) _L_FLAG((tty),ECHOE) -#define L_ECHOK(tty) _L_FLAG((tty),ECHOK) -#define L_ECHONL(tty) _L_FLAG((tty),ECHONL) -#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH) -#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP) -#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL) -#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT) -#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE) -#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO) -#define L_PENDIN(tty) _L_FLAG((tty),PENDIN) -#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN) +#define _I_FLAG(tty, f) ((tty)->termios->c_iflag & (f)) +#define _O_FLAG(tty, f) ((tty)->termios->c_oflag & (f)) +#define _C_FLAG(tty, f) ((tty)->termios->c_cflag & (f)) +#define _L_FLAG(tty, f) ((tty)->termios->c_lflag & (f)) + +#define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK) +#define I_BRKINT(tty) _I_FLAG((tty), BRKINT) +#define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR) +#define I_PARMRK(tty) _I_FLAG((tty), PARMRK) +#define I_INPCK(tty) _I_FLAG((tty), INPCK) +#define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP) +#define I_INLCR(tty) _I_FLAG((tty), INLCR) +#define I_IGNCR(tty) _I_FLAG((tty), IGNCR) +#define I_ICRNL(tty) _I_FLAG((tty), ICRNL) +#define I_IUCLC(tty) _I_FLAG((tty), IUCLC) +#define I_IXON(tty) _I_FLAG((tty), IXON) +#define I_IXANY(tty) _I_FLAG((tty), IXANY) +#define I_IXOFF(tty) _I_FLAG((tty), IXOFF) +#define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL) +#define I_IUTF8(tty) _I_FLAG((tty), IUTF8) + +#define O_OPOST(tty) _O_FLAG((tty), OPOST) +#define O_OLCUC(tty) _O_FLAG((tty), OLCUC) +#define O_ONLCR(tty) _O_FLAG((tty), ONLCR) +#define O_OCRNL(tty) _O_FLAG((tty), OCRNL) +#define O_ONOCR(tty) _O_FLAG((tty), ONOCR) +#define O_ONLRET(tty) _O_FLAG((tty), ONLRET) +#define O_OFILL(tty) _O_FLAG((tty), OFILL) +#define O_OFDEL(tty) _O_FLAG((tty), OFDEL) +#define O_NLDLY(tty) _O_FLAG((tty), NLDLY) +#define O_CRDLY(tty) _O_FLAG((tty), CRDLY) +#define O_TABDLY(tty) _O_FLAG((tty), TABDLY) +#define O_BSDLY(tty) _O_FLAG((tty), BSDLY) +#define O_VTDLY(tty) _O_FLAG((tty), VTDLY) +#define O_FFDLY(tty) _O_FLAG((tty), FFDLY) + +#define C_BAUD(tty) _C_FLAG((tty), CBAUD) +#define C_CSIZE(tty) _C_FLAG((tty), CSIZE) +#define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB) +#define C_CREAD(tty) _C_FLAG((tty), CREAD) +#define C_PARENB(tty) _C_FLAG((tty), PARENB) +#define C_PARODD(tty) _C_FLAG((tty), PARODD) +#define C_HUPCL(tty) _C_FLAG((tty), HUPCL) +#define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL) +#define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD) +#define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS) + +#define L_ISIG(tty) _L_FLAG((tty), ISIG) +#define L_ICANON(tty) _L_FLAG((tty), ICANON) +#define L_XCASE(tty) _L_FLAG((tty), XCASE) +#define L_ECHO(tty) _L_FLAG((tty), ECHO) +#define L_ECHOE(tty) _L_FLAG((tty), ECHOE) +#define L_ECHOK(tty) _L_FLAG((tty), ECHOK) +#define L_ECHONL(tty) _L_FLAG((tty), ECHONL) +#define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH) +#define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP) +#define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL) +#define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT) +#define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE) +#define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) +#define L_PENDIN(tty) _L_FLAG((tty), PENDIN) +#define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) struct device; struct signal_struct; + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct tty_port { + struct tty_struct *tty; /* Back pointer */ + int blocked_open; /* Waiting to open */ + int count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t close_wait; /* Close waiters */ + unsigned long flags; /* TTY flags ASY_*/ + struct mutex mutex; /* Locking */ + unsigned char *xmit_buf; /* Optional buffer */ + int close_delay; /* Close port delay */ + int closing_wait; /* Delay for output */ +}; + /* * Where all of the state associated with a tty is kept while the tty * is open. Since the termios state should be kept even if the tty @@ -185,6 +211,7 @@ struct tty_struct { struct tty_driver *driver; const struct tty_operations *ops; int index; + /* The ldisc objects are protected by tty_ldisc_lock at the moment */ struct tty_ldisc ldisc; struct mutex termios_mutex; spinlock_t ctrl_lock; @@ -213,7 +240,7 @@ struct tty_struct { struct list_head tty_files; #define N_TTY_BUF_SIZE 4096 - + /* * The following is data for the N_TTY line discipline. For * historical reasons, this is included in the tty structure. @@ -241,6 +268,7 @@ struct tty_struct { spinlock_t read_lock; /* If the tty has a pending do_SAK, queue it here - akpm */ struct work_struct SAK_work; + struct tty_port *port; }; /* tty magic number */ @@ -248,14 +276,14 @@ struct tty_struct { /* * These bits are used in the flags field of the tty structure. - * + * * So that interrupts won't be able to mess up the queues, * copy_to_cooked must be atomic with respect to itself, as must * tty->write. Thus, you must use the inline functions set_bit() and * clear_bit() to make things atomic. */ #define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */ -#define TTY_IO_ERROR 1 /* Canse an I/O error (may be no ldisc too) */ +#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DEBUG 4 /* Debugging */ @@ -285,12 +313,10 @@ extern int vcs_init(void); extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine); extern char *tty_name(struct tty_struct *tty, char *buf); -extern void tty_wait_until_sent(struct tty_struct * tty, long timeout); -extern int tty_check_change(struct tty_struct * tty); -extern void stop_tty(struct tty_struct * tty); -extern void start_tty(struct tty_struct * tty); -extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc); -extern int tty_unregister_ldisc(int disc); +extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); +extern int tty_check_change(struct tty_struct *tty); +extern void stop_tty(struct tty_struct *tty); +extern void start_tty(struct tty_struct *tty); extern int tty_register_driver(struct tty_driver *driver); extern int tty_unregister_driver(struct tty_driver *driver); extern struct device *tty_register_device(struct tty_driver *driver, @@ -310,10 +336,10 @@ extern int is_current_pgrp_orphaned(void); extern struct pid *tty_get_pgrp(struct tty_struct *tty); extern int is_ignored(int sig); extern int tty_signal(int sig, struct tty_struct *tty); -extern void tty_hangup(struct tty_struct * tty); -extern void tty_vhangup(struct tty_struct * tty); +extern void tty_hangup(struct tty_struct *tty); +extern void tty_vhangup(struct tty_struct *tty); extern void tty_unhangup(struct file *filp); -extern int tty_hung_up_p(struct file * filp); +extern int tty_hung_up_p(struct file *filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); extern void disassociate_ctty(int priv); @@ -322,17 +348,17 @@ extern void tty_flip_buffer_push(struct tty_struct *tty); extern speed_t tty_get_baud_rate(struct tty_struct *tty); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); -extern void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud); -extern void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud); +extern void tty_termios_encode_baud_rate(struct ktermios *termios, + speed_t ibaud, speed_t obaud); +extern void tty_encode_baud_rate(struct tty_struct *tty, + speed_t ibaud, speed_t obaud); extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); extern void tty_ldisc_deref(struct tty_ldisc *); extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); - -extern struct tty_ldisc *tty_ldisc_get(int); -extern void tty_ldisc_put(int); +extern const struct file_operations tty_ldiscs_proc_fops; extern void tty_wakeup(struct tty_struct *tty); extern void tty_ldisc_flush(struct tty_struct *tty); @@ -351,10 +377,23 @@ extern void tty_write_unlock(struct tty_struct *tty); extern int tty_write_lock(struct tty_struct *tty, int ndelay); #define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) +extern void tty_port_init(struct tty_port *port); +extern int tty_port_alloc_xmit_buf(struct tty_port *port); +extern void tty_port_free_xmit_buf(struct tty_port *port); + +extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); +extern int tty_unregister_ldisc(int disc); +extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); +extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_init(struct tty_struct *tty); +extern void tty_ldisc_begin(void); +/* This last one is just for the tty layer internals and shouldn't be used elsewhere */ +extern void tty_ldisc_enable(struct tty_struct *tty); /* n_tty.c */ -extern struct tty_ldisc tty_ldisc_N_TTY; +extern struct tty_ldisc_ops tty_ldisc_N_TTY; /* tty_audit.c */ #ifdef CONFIG_AUDIT @@ -363,7 +402,8 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); extern void tty_audit_push(struct tty_struct *tty); -extern void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid); +extern void tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid); #else static inline void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size) @@ -378,19 +418,20 @@ static inline void tty_audit_fork(struct signal_struct *sig) static inline void tty_audit_push(struct tty_struct *tty) { } -static inline void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid) +static inline void tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid) { } #endif /* tty_ioctl.c */ -extern int n_tty_ioctl(struct tty_struct * tty, struct file * file, +extern int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); /* serial.c */ extern void serial_console_init(void); - + /* pcxx.c */ extern int pcxe_open(struct tty_struct *tty, struct file *filp); @@ -401,7 +442,7 @@ extern void console_print(const char *); /* vt.c */ -extern int vt_ioctl(struct tty_struct *tty, struct file * file, +extern int vt_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); #endif /* __KERNEL__ */ diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index d2a003586761..e1065ac0d922 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -135,7 +135,7 @@ * * Optional: * - * void (*break_ctl)(struct tty_stuct *tty, int state); + * int (*break_ctl)(struct tty_stuct *tty, int state); * * This optional routine requests the tty driver to turn on or * off BREAK status on the RS-232 port. If state is -1, @@ -146,6 +146,10 @@ * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK, * TIOCCBRK. * + * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface + * will also be called with actual times and the hardware is expected + * to do the delay work itself. 0 and -1 are still used for on/off. + * * Optional: Required for TCSBRK/BRKP/etc handling. * * void (*wait_until_sent)(struct tty_struct *tty, int timeout); @@ -192,7 +196,7 @@ struct tty_operations { void (*stop)(struct tty_struct *tty); void (*start)(struct tty_struct *tty); void (*hangup)(struct tty_struct *tty); - void (*break_ctl)(struct tty_struct *tty, int state); + int (*break_ctl)(struct tty_struct *tty, int state); void (*flush_buffer)(struct tty_struct *tty); void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); @@ -285,12 +289,18 @@ extern struct tty_driver *tty_find_polling_driver(char *name, int *line); * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead * use dynamic memory keyed through the devpts filesystem. This * is only applicable to the pty driver. + * + * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass + * the requested timeout to the caller instead of using a simple + * on/off interface. + * */ #define TTY_DRIVER_INSTALLED 0x0001 #define TTY_DRIVER_RESET_TERMIOS 0x0002 #define TTY_DRIVER_REAL_RAW 0x0004 #define TTY_DRIVER_DYNAMIC_DEV 0x0008 #define TTY_DRIVER_DEVPTS_MEM 0x0010 +#define TTY_DRIVER_HARDWARE_BREAK 0x0020 /* tty driver types */ #define TTY_DRIVER_TYPE_SYSTEM 0x0001 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 6226504d9108..40f38d896777 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -104,7 +104,7 @@ #include <linux/fs.h> #include <linux/wait.h> -struct tty_ldisc { +struct tty_ldisc_ops { int magic; char *name; int num; @@ -142,6 +142,11 @@ struct tty_ldisc { int refcount; }; +struct tty_ldisc { + struct tty_ldisc_ops *ops; + int refcount; +}; + #define TTY_LDISC_MAGIC 0x5403 #define LDISC_FLAG_DEFINED 0x00000001 diff --git a/include/linux/udp.h b/include/linux/udp.h index 581ca2c14c52..0cf5c4c0ec81 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -38,6 +38,7 @@ struct udphdr { #ifdef __KERNEL__ #include <net/inet_sock.h> #include <linux/skbuff.h> +#include <net/netns/hash.h> static inline struct udphdr *udp_hdr(const struct sk_buff *skb) { @@ -46,6 +47,11 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE 128 +static inline int udp_hashfn(struct net *net, const unsigned num) +{ + return (num + net_hash_mix(net)) & (UDP_HTABLE_SIZE - 1); +} + struct udp_sock { /* inet_sock has to be the first member */ struct inet_sock inet; diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 973386d439da..cdf338d94b7f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -36,7 +36,7 @@ struct uio_mem { struct uio_map *map; }; -#define MAX_UIO_MAPS 5 +#define MAX_UIO_MAPS 5 struct uio_device; @@ -53,6 +53,7 @@ struct uio_device; * @mmap: mmap operation for this uio device * @open: open operation for this uio device * @release: release operation for this uio device + * @irqcontrol: disable/enable irqs when 0/1 is written to /dev/uioX */ struct uio_info { struct uio_device *uio_dev; @@ -66,6 +67,7 @@ struct uio_info { int (*mmap)(struct uio_info *info, struct vm_area_struct *vma); int (*open)(struct uio_info *info, struct inode *inode); int (*release)(struct uio_info *info, struct inode *inode); + int (*irqcontrol)(struct uio_info *info, s32 irq_on); }; extern int __must_check @@ -80,11 +82,11 @@ static inline int __must_check extern void uio_unregister_device(struct uio_info *info); extern void uio_event_notify(struct uio_info *info); -/* defines for uio_device->irq */ +/* defines for uio_info->irq */ #define UIO_IRQ_CUSTOM -1 #define UIO_IRQ_NONE -2 -/* defines for uio_device->memtype */ +/* defines for uio_mem->memtype */ #define UIO_MEM_NONE 0 #define UIO_MEM_PHYS 1 #define UIO_MEM_LOGICAL 2 diff --git a/include/linux/usb.h b/include/linux/usb.h index c08689ea9b4b..5811c5da69f9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -160,6 +160,7 @@ struct usb_interface { unsigned is_active:1; /* the interface is not suspended */ unsigned sysfs_files_created:1; /* the sysfs attributes exist */ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ + unsigned needs_binding:1; /* needs delayed unbind/rebind */ struct device dev; /* interface specific device info */ struct device *usb_dev; @@ -293,7 +294,7 @@ struct usb_devmap { struct usb_bus { struct device *controller; /* host/master side hardware */ int busnum; /* Bus number (in order of reg) */ - char *bus_name; /* stable id (PCI slot_name etc) */ + const char *bus_name; /* stable id (PCI slot_name etc) */ u8 uses_dma; /* Does the host controller use DMA? */ u8 otg_port; /* 0, or number of OTG/HNP port */ unsigned is_b_host:1; /* true during some HNP roleswitches */ @@ -497,8 +498,6 @@ extern int usb_lock_device_for_reset(struct usb_device *udev, /* USB port reset for device reinitialization */ extern int usb_reset_device(struct usb_device *dev); -extern int usb_reset_composite_device(struct usb_device *dev, - struct usb_interface *iface); extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); @@ -958,9 +957,9 @@ struct usbdrv_wrap { * @resume: Called when the device is being resumed by the system. * @reset_resume: Called when the suspended device has been reset instead * of being resumed. - * @pre_reset: Called by usb_reset_composite_device() when the device + * @pre_reset: Called by usb_reset_device() when the device * is about to be reset. - * @post_reset: Called by usb_reset_composite_device() after the device + * @post_reset: Called by usb_reset_device() after the device * has been reset * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set @@ -972,6 +971,8 @@ struct usbdrv_wrap { * added to this driver by preventing the sysfs file from being created. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for interfaces bound to this driver. + * @soft_unbind: if set to 1, the USB core will not kill URBs and disable + * endpoints before calling the driver's disconnect method. * * USB interface drivers must provide a name, probe() and disconnect() * methods, and an id_table. Other driver fields are optional. @@ -1012,6 +1013,7 @@ struct usb_driver { struct usbdrv_wrap drvwrap; unsigned int no_dynamic_id:1; unsigned int supports_autosuspend:1; + unsigned int soft_unbind:1; }; #define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver) diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h new file mode 100644 index 000000000000..747c3a49cdc9 --- /dev/null +++ b/include/linux/usb/composite.h @@ -0,0 +1,338 @@ +/* + * composite.h -- framework for usb gadgets which are composite devices + * + * Copyright (C) 2006-2008 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __LINUX_USB_COMPOSITE_H +#define __LINUX_USB_COMPOSITE_H + +/* + * This framework is an optional layer on top of the USB Gadget interface, + * making it easier to build (a) Composite devices, supporting multiple + * functions within any single configuration, and (b) Multi-configuration + * devices, also supporting multiple functions but without necessarily + * having more than one function per configuration. + * + * Example: a device with a single configuration supporting both network + * link and mass storage functions is a composite device. Those functions + * might alternatively be packaged in individual configurations, but in + * the composite model the host can use both functions at the same time. + */ + +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> + + +struct usb_configuration; + +/** + * struct usb_function - describes one function of a configuration + * @name: For diagnostics, identifies the function. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and by language IDs provided in control requests + * @descriptors: Table of full (or low) speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at full speed (or at low speed). + * @hs_descriptors: Table of high speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at high speed. + * @config: assigned when @usb_add_function() is called; this is the + * configuration with which this function is associated. + * @bind: Before the gadget can register, all of its functions bind() to the + * available resources including string and interface identifiers used + * in interface or class descriptors; endpoints; I/O buffers; and so on. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this function. + * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may + * initialize usb_ep.driver data at this time (when it is used). + * Note that setting an interface to its current altsetting resets + * interface state, and that all interfaces have a disabled state. + * @get_alt: Returns the active altsetting. If this is not provided, + * then only altsetting zero is supported. + * @disable: (REQUIRED) Indicates the function should be disabled. Reasons + * include host resetting or reconfiguring the gadget, and disconnection. + * @setup: Used for interface-specific control requests. + * @suspend: Notifies functions when the host stops sending USB traffic. + * @resume: Notifies functions when the host restarts USB traffic. + * + * A single USB function uses one or more interfaces, and should in most + * cases support operation at both full and high speeds. Each function is + * associated by @usb_add_function() with a one configuration; that function + * causes @bind() to be called so resources can be allocated as part of + * setting up a gadget driver. Those resources include endpoints, which + * should be allocated using @usb_ep_autoconfig(). + * + * To support dual speed operation, a function driver provides descriptors + * for both high and full speed operation. Except in rare cases that don't + * involve bulk endpoints, each speed needs different endpoint descriptors. + * + * Function drivers choose their own strategies for managing instance data. + * The simplest strategy just declares it "static', which means the function + * can only be activated once. If the function needs to be exposed in more + * than one configuration at a given speed, it needs to support multiple + * usb_function structures (one for each configuration). + * + * A more complex strategy might encapsulate a @usb_function structure inside + * a driver-specific instance structure to allows multiple activations. An + * example of multiple activations might be a CDC ACM function that supports + * two or more distinct instances within the same configuration, providing + * several independent logical data links to a USB host. + */ +struct usb_function { + const char *name; + struct usb_gadget_strings **strings; + struct usb_descriptor_header **descriptors; + struct usb_descriptor_header **hs_descriptors; + + struct usb_configuration *config; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching. + * Related: unbind() may kfree() but bind() won't... + */ + + /* configuration management: bind/unbind */ + int (*bind)(struct usb_configuration *, + struct usb_function *); + void (*unbind)(struct usb_configuration *, + struct usb_function *); + + /* runtime state management */ + int (*set_alt)(struct usb_function *, + unsigned interface, unsigned alt); + int (*get_alt)(struct usb_function *, + unsigned interface); + void (*disable)(struct usb_function *); + int (*setup)(struct usb_function *, + const struct usb_ctrlrequest *); + void (*suspend)(struct usb_function *); + void (*resume)(struct usb_function *); + + /* internals */ + struct list_head list; +}; + +int usb_add_function(struct usb_configuration *, struct usb_function *); + +int usb_interface_id(struct usb_configuration *, struct usb_function *); + +/** + * ep_choose - select descriptor endpoint at current device speed + * @g: gadget, connected and running at some speed + * @hs: descriptor to use for high speed operation + * @fs: descriptor to use for full or low speed operation + */ +static inline struct usb_endpoint_descriptor * +ep_choose(struct usb_gadget *g, struct usb_endpoint_descriptor *hs, + struct usb_endpoint_descriptor *fs) +{ + if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return hs; + return fs; +} + +#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */ + +/** + * struct usb_configuration - represents one gadget configuration + * @label: For diagnostics, describes the configuration. + * @strings: Tables of strings, keyed by identifiers assigned during @bind() + * and by language IDs provided in control requests. + * @descriptors: Table of descriptors preceding all function descriptors. + * Examples include OTG and vendor-specific descriptors. + * @bind: Called from @usb_add_config() to allocate resources unique to this + * configuration and to call @usb_add_function() for each function used. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this configuration. + * @setup: Used to delegate control requests that aren't handled by standard + * device infrastructure or directed at a specific interface. + * @bConfigurationValue: Copied into configuration descriptor. + * @iConfiguration: Copied into configuration descriptor. + * @bmAttributes: Copied into configuration descriptor. + * @bMaxPower: Copied into configuration descriptor. + * @cdev: assigned by @usb_add_config() before calling @bind(); this is + * the device associated with this configuration. + * + * Configurations are building blocks for gadget drivers structured around + * function drivers. Simple USB gadgets require only one function and one + * configuration, and handle dual-speed hardware by always providing the same + * functionality. Slightly more complex gadgets may have more than one + * single-function configuration at a given speed; or have configurations + * that only work at one speed. + * + * Composite devices are, by definition, ones with configurations which + * include more than one function. + * + * The lifecycle of a usb_configuration includes allocation, initialization + * of the fields described above, and calling @usb_add_config() to set up + * internal data and bind it to a specific device. The configuration's + * @bind() method is then used to initialize all the functions and then + * call @usb_add_function() for them. + * + * Those functions would normally be independant of each other, but that's + * not mandatory. CDC WMC devices are an example where functions often + * depend on other functions, with some functions subsidiary to others. + * Such interdependency may be managed in any way, so long as all of the + * descriptors complete by the time the composite driver returns from + * its bind() routine. + */ +struct usb_configuration { + const char *label; + struct usb_gadget_strings **strings; + const struct usb_descriptor_header **descriptors; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching... + */ + + /* configuration management: bind/unbind */ + int (*bind)(struct usb_configuration *); + void (*unbind)(struct usb_configuration *); + int (*setup)(struct usb_configuration *, + const struct usb_ctrlrequest *); + + /* fields in the config descriptor */ + u8 bConfigurationValue; + u8 iConfiguration; + u8 bmAttributes; + u8 bMaxPower; + + struct usb_composite_dev *cdev; + + /* internals */ + struct list_head list; + struct list_head functions; + u8 next_interface_id; + unsigned highspeed:1; + unsigned fullspeed:1; + struct usb_function *interface[MAX_CONFIG_INTERFACES]; +}; + +int usb_add_config(struct usb_composite_dev *, + struct usb_configuration *); + +/** + * struct usb_composite_driver - groups configurations into a gadget + * @name: For diagnostics, identifies the driver. + * @dev: Template descriptor for the device, including default device + * identifiers. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and language IDs provided in control requests + * @bind: (REQUIRED) Used to allocate resources that are shared across the + * whole device, such as string IDs, and add its configurations using + * @usb_add_config(). This may fail by returning a negative errno + * value; it should return zero on successful initialization. + * @unbind: Reverses @bind(); called as a side effect of unregistering + * this driver. + * + * Devices default to reporting self powered operation. Devices which rely + * on bus powered operation should report this in their @bind() method. + * + * Before returning from @bind, various fields in the template descriptor + * may be overridden. These include the idVendor/idProduct/bcdDevice values + * normally to bind the appropriate host side driver, and the three strings + * (iManufacturer, iProduct, iSerialNumber) normally used to provide user + * meaningful device identifiers. (The strings will not be defined unless + * they are defined in @dev and @strings.) The correct ep0 maxpacket size + * is also reported, as defined by the underlying controller driver. + */ +struct usb_composite_driver { + const char *name; + const struct usb_device_descriptor *dev; + struct usb_gadget_strings **strings; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching... + */ + + int (*bind)(struct usb_composite_dev *); + int (*unbind)(struct usb_composite_dev *); +}; + +extern int usb_composite_register(struct usb_composite_driver *); +extern void usb_composite_unregister(struct usb_composite_driver *); + + +/** + * struct usb_composite_device - represents one composite usb gadget + * @gadget: read-only, abstracts the gadget's usb peripheral controller + * @req: used for control responses; buffer is pre-allocated + * @bufsiz: size of buffer pre-allocated in @req + * @config: the currently active configuration + * + * One of these devices is allocated and initialized before the + * associated device driver's bind() is called. + * + * OPEN ISSUE: it appears that some WUSB devices will need to be + * built by combining a normal (wired) gadget with a wireless one. + * This revision of the gadget framework should probably try to make + * sure doing that won't hurt too much. + * + * One notion for how to handle Wireless USB devices involves: + * (a) a second gadget here, discovery mechanism TBD, but likely + * needing separate "register/unregister WUSB gadget" calls; + * (b) updates to usb_gadget to include flags "is it wireless", + * "is it wired", plus (presumably in a wrapper structure) + * bandgroup and PHY info; + * (c) presumably a wireless_ep wrapping a usb_ep, and reporting + * wireless-specific parameters like maxburst and maxsequence; + * (d) configurations that are specific to wireless links; + * (e) function drivers that understand wireless configs and will + * support wireless for (additional) function instances; + * (f) a function to support association setup (like CBAF), not + * necessarily requiring a wireless adapter; + * (g) composite device setup that can create one or more wireless + * configs, including appropriate association setup support; + * (h) more, TBD. + */ +struct usb_composite_dev { + struct usb_gadget *gadget; + struct usb_request *req; + unsigned bufsiz; + + struct usb_configuration *config; + + /* internals */ + struct usb_device_descriptor desc; + struct list_head configs; + struct usb_composite_driver *driver; + u8 next_string_id; + + spinlock_t lock; + + /* REVISIT use and existence of lock ... */ +}; + +extern int usb_string_id(struct usb_composite_dev *c); + +/* messaging utils */ +#define DBG(d, fmt, args...) \ + dev_dbg(&(d)->gadget->dev , fmt , ## args) +#define VDBG(d, fmt, args...) \ + dev_vdbg(&(d)->gadget->dev , fmt , ## args) +#define ERROR(d, fmt, args...) \ + dev_err(&(d)->gadget->dev , fmt , ## args) +#define WARN(d, fmt, args...) \ + dev_warn(&(d)->gadget->dev , fmt , ## args) +#define INFO(d, fmt, args...) \ + dev_info(&(d)->gadget->dev , fmt , ## args) + +#endif /* __LINUX_USB_COMPOSITE_H */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index cf468fbdbf8e..0460a746480c 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -33,7 +33,8 @@ struct usb_ep; * @short_not_ok: When reading data, makes short packets be * treated as errors (queue stops advancing till cleanup). * @complete: Function called when request completes, so this request and - * its buffer may be re-used. + * its buffer may be re-used. The function will always be called with + * interrupts disabled, and it must not sleep. * Reads terminate with a short packet, or when the buffer fills, * whichever comes first. When writes terminate, some data bytes * will usually still be in flight (often in a hardware fifo). @@ -271,7 +272,10 @@ static inline void usb_ep_free_request(struct usb_ep *ep, * (Note that some USB device controllers disallow protocol stall responses * in some cases.) When control responses are deferred (the response is * written after the setup callback returns), then usb_ep_set_halt() may be - * used on ep0 to trigger protocol stalls. + * used on ep0 to trigger protocol stalls. Depending on the controller, + * it may not be possible to trigger a status-stage protocol stall when the + * data stage is over, that is, from within the response's completion + * routine. * * For periodic endpoints, like interrupt or isochronous ones, the usb host * arranges to poll once per interval, and the gadget driver usually will @@ -858,6 +862,25 @@ int usb_descriptor_fillbuf(void *, unsigned, int usb_gadget_config_buf(const struct usb_config_descriptor *config, void *buf, unsigned buflen, const struct usb_descriptor_header **desc); +/* copy a NULL-terminated vector of descriptors */ +struct usb_descriptor_header **usb_copy_descriptors( + struct usb_descriptor_header **); + +/* return copy of endpoint descriptor given original descriptor set */ +struct usb_endpoint_descriptor *usb_find_endpoint( + struct usb_descriptor_header **src, + struct usb_descriptor_header **copy, + struct usb_endpoint_descriptor *match); + +/** + * usb_free_descriptors - free descriptors returned by usb_copy_descriptors() + * @v: vector of descriptors + */ +static inline void usb_free_descriptors(struct usb_descriptor_header **v) +{ + kfree(v); +} + /*-------------------------------------------------------------------------*/ /* utility wrapping a simple endpoint selection policy */ diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h new file mode 100644 index 000000000000..e345ceaf72d6 --- /dev/null +++ b/include/linux/usb/irda.h @@ -0,0 +1,151 @@ +/* + * USB IrDA Bridge Device Definition + */ + +#ifndef __LINUX_USB_IRDA_H +#define __LINUX_USB_IRDA_H + +/* This device should use Application-specific class */ + +#define USB_SUBCLASS_IRDA 0x02 + +/*-------------------------------------------------------------------------*/ + +/* Class-Specific requests (bRequest field) */ + +#define USB_REQ_CS_IRDA_RECEIVING 1 +#define USB_REQ_CS_IRDA_CHECK_MEDIA_BUSY 3 +#define USB_REQ_CS_IRDA_RATE_SNIFF 4 +#define USB_REQ_CS_IRDA_UNICAST_LIST 5 +#define USB_REQ_CS_IRDA_GET_CLASS_DESC 6 + +/*-------------------------------------------------------------------------*/ + +/* Class-Specific descriptor */ + +#define USB_DT_CS_IRDA 0x21 + +/*-------------------------------------------------------------------------*/ + +/* Data sizes */ + +#define USB_IRDA_DS_2048 (1 << 5) +#define USB_IRDA_DS_1024 (1 << 4) +#define USB_IRDA_DS_512 (1 << 3) +#define USB_IRDA_DS_256 (1 << 2) +#define USB_IRDA_DS_128 (1 << 1) +#define USB_IRDA_DS_64 (1 << 0) + +/* Window sizes */ + +#define USB_IRDA_WS_7 (1 << 6) +#define USB_IRDA_WS_6 (1 << 5) +#define USB_IRDA_WS_5 (1 << 4) +#define USB_IRDA_WS_4 (1 << 3) +#define USB_IRDA_WS_3 (1 << 2) +#define USB_IRDA_WS_2 (1 << 1) +#define USB_IRDA_WS_1 (1 << 0) + +/* Min turnaround times in usecs */ + +#define USB_IRDA_MTT_0 (1 << 7) +#define USB_IRDA_MTT_10 (1 << 6) +#define USB_IRDA_MTT_50 (1 << 5) +#define USB_IRDA_MTT_100 (1 << 4) +#define USB_IRDA_MTT_500 (1 << 3) +#define USB_IRDA_MTT_1000 (1 << 2) +#define USB_IRDA_MTT_5000 (1 << 1) +#define USB_IRDA_MTT_10000 (1 << 0) + +/* Baud rates */ + +#define USB_IRDA_BR_4000000 (1 << 8) +#define USB_IRDA_BR_1152000 (1 << 7) +#define USB_IRDA_BR_576000 (1 << 6) +#define USB_IRDA_BR_115200 (1 << 5) +#define USB_IRDA_BR_57600 (1 << 4) +#define USB_IRDA_BR_38400 (1 << 3) +#define USB_IRDA_BR_19200 (1 << 2) +#define USB_IRDA_BR_9600 (1 << 1) +#define USB_IRDA_BR_2400 (1 << 0) + +/* Additional BOFs */ + +#define USB_IRDA_AB_0 (1 << 7) +#define USB_IRDA_AB_1 (1 << 6) +#define USB_IRDA_AB_2 (1 << 5) +#define USB_IRDA_AB_3 (1 << 4) +#define USB_IRDA_AB_6 (1 << 3) +#define USB_IRDA_AB_12 (1 << 2) +#define USB_IRDA_AB_24 (1 << 1) +#define USB_IRDA_AB_48 (1 << 0) + +/* IRDA Rate Sniff */ + +#define USB_IRDA_RATE_SNIFF 1 + +/*-------------------------------------------------------------------------*/ + +struct usb_irda_cs_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdSpecRevision; + __u8 bmDataSize; + __u8 bmWindowSize; + __u8 bmMinTurnaroundTime; + __le16 wBaudRate; + __u8 bmAdditionalBOFs; + __u8 bIrdaRateSniff; + __u8 bMaxUnicastList; +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* Data Format */ + +#define USB_IRDA_STATUS_MEDIA_BUSY (1 << 7) + +/* The following is a 4-bit value used for both + * inbound and outbound headers: + * + * 0 - speed ignored + * 1 - 2400 bps + * 2 - 9600 bps + * 3 - 19200 bps + * 4 - 38400 bps + * 5 - 57600 bps + * 6 - 115200 bps + * 7 - 576000 bps + * 8 - 1.152 Mbps + * 9 - 5 mbps + * 10..15 - Reserved + */ +#define USB_IRDA_STATUS_LINK_SPEED 0x0f + +/* The following is a 4-bit value used only for + * outbound header: + * + * 0 - No change (BOF ignored) + * 1 - 48 BOFs + * 2 - 24 BOFs + * 3 - 12 BOFs + * 4 - 6 BOFs + * 5 - 3 BOFs + * 6 - 2 BOFs + * 7 - 1 BOFs + * 8 - 0 BOFs + * 9..15 - Reserved + */ +#define USB_IRDA_EXTRA_BOFS 0xf0 + +struct usb_irda_inbound_header { + __u8 bmStatus; +}; + +struct usb_irda_outbound_header { + __u8 bmChange; +}; + +#endif /* __LINUX_USB_IRDA_H */ + diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index 29d6458ecb8d..0a6e6d4b929a 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -260,7 +260,8 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */ extern void rndis_status(struct usbnet *dev, struct urb *urb); -extern int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf); +extern int +rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen); extern int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags); extern void rndis_unbind(struct usbnet *dev, struct usb_interface *intf); diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 8f891cbaf9ab..09a3e6a7518f 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -62,7 +62,7 @@ */ struct usb_serial_port { struct usb_serial *serial; - struct tty_struct *tty; + struct tty_port port; spinlock_t lock; struct mutex mutex; unsigned char number; @@ -89,7 +89,6 @@ struct usb_serial_port { wait_queue_head_t write_wait; struct work_struct work; - int open_count; char throttled; char throttle_req; char console; @@ -217,22 +216,27 @@ struct usb_serial_driver { int (*resume)(struct usb_serial *serial); /* serial function calls */ - int (*open)(struct usb_serial_port *port, struct file *filp); - void (*close)(struct usb_serial_port *port, struct file *filp); - int (*write)(struct usb_serial_port *port, const unsigned char *buf, - int count); - int (*write_room)(struct usb_serial_port *port); - int (*ioctl)(struct usb_serial_port *port, struct file *file, + /* Called by console with tty = NULL and by tty */ + int (*open)(struct tty_struct *tty, + struct usb_serial_port *port, struct file *filp); + void (*close)(struct tty_struct *tty, + struct usb_serial_port *port, struct file *filp); + int (*write)(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); + /* Called only by the tty layer */ + int (*write_room)(struct tty_struct *tty); + int (*ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); - void (*set_termios)(struct usb_serial_port *port, struct ktermios *old); - void (*break_ctl)(struct usb_serial_port *port, int break_state); - int (*chars_in_buffer)(struct usb_serial_port *port); - void (*throttle)(struct usb_serial_port *port); - void (*unthrottle)(struct usb_serial_port *port); - int (*tiocmget)(struct usb_serial_port *port, struct file *file); - int (*tiocmset)(struct usb_serial_port *port, struct file *file, + void (*set_termios)(struct tty_struct *tty, + struct usb_serial_port *port, struct ktermios *old); + void (*break_ctl)(struct tty_struct *tty, int break_state); + int (*chars_in_buffer)(struct tty_struct *tty); + void (*throttle)(struct tty_struct *tty); + void (*unthrottle)(struct tty_struct *tty); + int (*tiocmget)(struct tty_struct *tty, struct file *file); + int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); - + /* USB events */ void (*read_int_callback)(struct urb *urb); void (*write_int_callback)(struct urb *urb); void (*read_bulk_callback)(struct urb *urb); @@ -270,19 +274,19 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} /* Functions needed by other parts of the usbserial core */ extern struct usb_serial *usb_serial_get_by_index(unsigned int minor); extern void usb_serial_put(struct usb_serial *serial); -extern int usb_serial_generic_open(struct usb_serial_port *port, - struct file *filp); -extern int usb_serial_generic_write(struct usb_serial_port *port, - const unsigned char *buf, int count); -extern void usb_serial_generic_close(struct usb_serial_port *port, - struct file *filp); +extern int usb_serial_generic_open(struct tty_struct *tty, + struct usb_serial_port *port, struct file *filp); +extern int usb_serial_generic_write(struct tty_struct *tty, + struct usb_serial_port *port, const unsigned char *buf, int count); +extern void usb_serial_generic_close(struct tty_struct *tty, + struct usb_serial_port *port, struct file *filp); extern int usb_serial_generic_resume(struct usb_serial *serial); -extern int usb_serial_generic_write_room(struct usb_serial_port *port); -extern int usb_serial_generic_chars_in_buffer(struct usb_serial_port *port); +extern int usb_serial_generic_write_room(struct tty_struct *tty); +extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); extern void usb_serial_generic_read_bulk_callback(struct urb *urb); extern void usb_serial_generic_write_bulk_callback(struct urb *urb); -extern void usb_serial_generic_throttle(struct usb_serial_port *port); -extern void usb_serial_generic_unthrottle(struct usb_serial_port *port); +extern void usb_serial_generic_throttle(struct tty_struct *tty); +extern void usb_serial_generic_unthrottle(struct tty_struct *tty); extern void usb_serial_generic_shutdown(struct usb_serial *serial); extern int usb_serial_generic_register(int debug); extern void usb_serial_generic_deregister(void); diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 3118ede2c67b..0044d9b4cb85 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -22,8 +22,6 @@ * * History: * 0.1 04.01.2000 Created - * - * $Id: usbdevice_fs.h,v 1.1 2000/01/06 18:40:41 tom Exp $ */ /*****************************************************************************/ diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 4a535ea1e123..2e66a95e8d32 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -246,6 +246,7 @@ struct v4l2_capability #define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ #define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ #define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */ +#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */ #define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ #define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ @@ -309,6 +310,7 @@ struct v4l2_pix_format /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */ #define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ @@ -323,6 +325,9 @@ struct v4l2_pix_format #define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */ #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */ #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */ /* * F O R M A T E N U M E R A T I O N @@ -1156,6 +1161,14 @@ struct v4l2_frequency __u32 reserved[8]; }; +struct v4l2_hw_freq_seek { + __u32 tuner; + enum v4l2_tuner_type type; + __u32 seek_upward; + __u32 wrap_around; + __u32 reserved[8]; +}; + /* * A U D I O */ @@ -1441,6 +1454,7 @@ struct v4l2_chip_ident { #define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) #endif +#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek) #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h index 3add87465b1f..e0aa39612eba 100644 --- a/include/linux/wanrouter.h +++ b/include/linux/wanrouter.h @@ -522,7 +522,7 @@ extern int wanrouter_proc_init(void); extern void wanrouter_proc_cleanup(void); extern int wanrouter_proc_add(struct wan_device *wandev); extern int wanrouter_proc_delete(struct wan_device *wandev); -extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); +extern long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* Public Data */ /* list of registered devices */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h index 0a9b5b41ed67..d7958f9b52cb 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -611,6 +611,7 @@ #define IW_ENCODE_ALG_WEP 1 #define IW_ENCODE_ALG_TKIP 2 #define IW_ENCODE_ALG_CCMP 3 +#define IW_ENCODE_ALG_PMK 4 /* struct iw_encode_ext ->ext_flags */ #define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 #define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 @@ -630,6 +631,7 @@ #define IW_ENC_CAPA_WPA2 0x00000002 #define IW_ENC_CAPA_CIPHER_TKIP 0x00000004 #define IW_ENC_CAPA_CIPHER_CCMP 0x00000008 +#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 /* Event capability macros - in (struct iw_range *)->event_capa * Because we have more than 32 possible events, we use an array of @@ -675,6 +677,19 @@ struct iw_point __u16 flags; /* Optional params */ }; +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +#include <linux/compat.h> + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; +#endif +#endif + /* * A frequency * For numbers lower than 10^9, we encode the number in 'm' and @@ -1098,6 +1113,21 @@ struct iw_event #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +struct __compat_iw_event { + __u16 len; /* Real length of this stuff */ + __u16 cmd; /* Wireless IOCTL */ + compat_caddr_t pointer; +}; +#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) +#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) +#define IW_EV_COMPAT_POINT_LEN \ + (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ + IW_EV_COMPAT_POINT_OFF) +#endif +#endif + /* Size of the Event prefix when packed in stream */ #define IW_EV_LCP_PK_LEN (4) /* Size of the various events when packed in stream */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index f462439cc288..12b15c561a1f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -63,6 +63,7 @@ struct writeback_control { unsigned for_writepages:1; /* This is a writepages() call */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned more_io:1; /* more io to be dispatched */ + unsigned range_cont:1; }; /* @@ -105,6 +106,8 @@ extern int vm_highmem_is_dirtyable; extern int block_dump; extern int laptop_mode; +extern unsigned long determine_dirtyable_memory(void); + extern int dirty_ratio_handler(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); |