diff options
996 files changed, 34691 insertions, 15187 deletions
@@ -1127,8 +1127,10 @@ S: Carnegie, Pennsylvania 15106-4304 S: USA N: Philip Gladstone -E: philip@raptor.com +E: philip@gladstonefamily.net D: Kernel / timekeeping stuff +S: Carlisle, MA 01741 +S: USA N: Jan-Benedict Glaw E: jbglaw@lug-owl.de @@ -3741,10 +3743,11 @@ D: Mylex DAC960 PCI RAID driver D: Miscellaneous kernel fixes N: Alessandro Zummo -E: azummo@ita.flashnet.it -W: http://freepage.logicom.it/azummo/ +E: a.zummo@towertech.it D: CMI8330 support is sb_card.c D: ISAPnP fixes in sb_card.c +D: ZyXEL omni.net lcd plus driver +D: RTC subsystem S: Italy N: Marc Zyngier diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index 684557474c15..ee4bb73683cd 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt @@ -199,6 +199,8 @@ address during PCI bus mastering you might do something like: "mydev: 24-bit DMA addressing not available.\n"); goto ignore_this_device; } +[Better use DMA_24BIT_MASK instead of 0x00ffffff. +See linux/include/dma-mapping.h for reference.] When pci_set_dma_mask() is successful, and returns zero, the PCI layer saves away this mask you have provided. The PCI layer will use this diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index b4ea51ad3610..07cb93b82ba9 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -605,7 +605,7 @@ are the same as those shown in the preceding section, so they are omitted. { int cpu; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) run_on(cpu); } diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 8e63831971d5..f989a9e839b4 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -132,8 +132,18 @@ Some new queue property settings: limit. No highmem default. blk_queue_max_sectors(q, max_sectors) - Maximum size request you can handle in units of 512 byte - sectors. 255 default. + Sets two variables that limit the size of the request. + + - The request queue's max_sectors, which is a soft size in + in units of 512 byte sectors, and could be dynamically varied + by the core kernel. + + - The request queue's max_hw_sectors, which is a hard limit + and reflects the maximum size request a driver can handle + in units of 512 byte sectors. + + The default for both max_sectors and max_hw_sectors is + 255. The upper limit of max_sectors is 1024. blk_queue_max_phys_segments(q, max_segments) Maximum physical segments you can handle in a request. 128 diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index 4ae418889b88..53245c429f7d 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -362,6 +362,27 @@ maps this page at its virtual address. likely that you will need to flush the instruction cache for copy_to_user_page(). + void flush_anon_page(struct page *page, unsigned long vmaddr) + When the kernel needs to access the contents of an anonymous + page, it calls this function (currently only + get_user_pages()). Note: flush_dcache_page() deliberately + doesn't work for an anonymous page. The default + implementation is a nop (and should remain so for all coherent + architectures). For incoherent architectures, it should flush + the cache of the page at vmaddr in the current user process. + + void flush_kernel_dcache_page(struct page *page) + When the kernel needs to modify a user page is has obtained + with kmap, it calls this function after all modifications are + complete (but before kunmapping it) to bring the underlying + page up to date. It is assumed here that the user has no + incoherent cached copies (i.e. the original page was obtained + from a mechanism like get_user_pages()). The default + implementation is a nop and should remain so on all coherent + architectures. On incoherent architectures, this should flush + the kernel cache for page (using page_address(page)). + + void flush_icache_range(unsigned long start, unsigned long end) When the kernel stores into addresses that it will execute out of (eg when loading modules), this function is called. diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index 57a09f99ecb0..1bcf69996c9d 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -97,13 +97,13 @@ at which time hotplug is disabled. You really dont need to manipulate any of the system cpu maps. They should be read-only for most use. When setting up per-cpu resources almost always use -cpu_possible_map/for_each_cpu() to iterate. +cpu_possible_map/for_each_possible_cpu() to iterate. Never use anything other than cpumask_t to represent bitmap of CPUs. #include <linux/cpumask.h> -for_each_cpu - Iterate over cpu_possible_map +for_each_possible_cpu - Iterate over cpu_possible_map for_each_online_cpu - Iterate over cpu_online_map for_each_present_cpu - Iterate over cpu_present_map for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt index ff280e2e1613..2b28e9ec4e3a 100644 --- a/Documentation/cputopology.txt +++ b/Documentation/cputopology.txt @@ -1,5 +1,5 @@ -Export cpu topology info by sysfs. Items (attributes) are similar +Export cpu topology info via sysfs. Items (attributes) are similar to /proc/cpuinfo. 1) /sys/devices/system/cpu/cpuX/topology/physical_package_id: @@ -12,7 +12,7 @@ represent the thread siblings to cpu X in the same core; represent the thread siblings to cpu X in the same physical package; To implement it in an architecture-neutral way, a new source file, -driver/base/topology.c, is to export the 5 attributes. +drivers/base/topology.c, is to export the 4 attributes. If one architecture wants to support this feature, it just needs to implement 4 defines, typically in file include/asm-XXX/topology.h. diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt index d37191fe5681..70d96a62e5e1 100644 --- a/Documentation/drivers/edac/edac.txt +++ b/Documentation/drivers/edac/edac.txt @@ -21,7 +21,7 @@ within the computer system. In the initial release, memory Correctable Errors Detecting CE events, then harvesting those events and reporting them, CAN be a predictor of future UE events. With CE events, the system can -continue to operate, but with less safety. Preventive maintainence and +continue to operate, but with less safety. Preventive maintenance and proactive part replacement of memory DIMMs exhibiting CEs can reduce the likelihood of the dreaded UE events and system 'panics'. @@ -29,13 +29,13 @@ the likelihood of the dreaded UE events and system 'panics'. In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices in order to determine if errors are occurring on data transfers. The presence of PCI Parity errors must be examined with a grain of salt. -There are several addin adapters that do NOT follow the PCI specification +There are several add-in adapters that do NOT follow the PCI specification with regards to Parity generation and reporting. The specification says the vendor should tie the parity status bits to 0 if they do not intend to generate parity. Some vendors do not do this, and thus the parity bit can "float" giving false positives. -The PCI Parity EDAC device has the ability to "skip" known flakey +The PCI Parity EDAC device has the ability to "skip" known flaky cards during the parity scan. These are set by the parity "blacklist" interface in the sysfs for PCI Parity. (See the PCI section in the sysfs section below.) There is also a parity "whitelist" which is used as @@ -101,7 +101,7 @@ Memory Controller (mc) Model First a background on the memory controller's model abstracted in EDAC. Each mc device controls a set of DIMM memory modules. These modules are -layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can +laid out in a Chip-Select Row (csrowX) and Channel table (chX). There can be multiple csrows and two channels. Memory controllers allow for several csrows, with 8 csrows being a typical value. @@ -131,7 +131,7 @@ for memory DIMMs: DIMM_B1 Labels for these slots are usually silk screened on the motherboard. Slots -labeled 'A' are channel 0 in this example. Slots labled 'B' +labeled 'A' are channel 0 in this example. Slots labeled 'B' are channel 1. Notice that there are two csrows possible on a physical DIMM. These csrows are allocated their csrow assignment based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM @@ -140,7 +140,7 @@ is placed in each Channel, the csrows cross both DIMMs. Memory DIMMs come single or dual "ranked". A rank is a populated csrow. Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above will have 1 csrow, csrow0. csrow1 will be empty. On the other hand, -when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and +when 2 dual ranked DIMMs are similarly placed, then both csrow0 and csrow1 will be populated. The pattern repeats itself for csrow2 and csrow3. @@ -246,7 +246,7 @@ Module Version read-only attribute file: 'mc_version' - The EDAC CORE modules's version and compile date are shown here to + The EDAC CORE module's version and compile date are shown here to indicate what EDAC is running. @@ -423,7 +423,7 @@ Total memory managed by this csrow attribute file: 'size_mb' This attribute file displays, in count of megabytes, of memory - that this csrow contatins. + that this csrow contains. Memory Type attribute file: @@ -557,7 +557,7 @@ On Header Type 00 devices the primary status is looked at for any parity error regardless of whether Parity is enabled on the device. (The spec indicates parity is generated in some cases). On Header Type 01 bridges, the secondary status register is also -looked at to see if parity ocurred on the bus on the other side of +looked at to see if parity occurred on the bus on the other side of the bridge. @@ -588,7 +588,7 @@ Panic on PCI PARITY Error: 'panic_on_pci_parity' - This control files enables or disables panic'ing when a parity + This control files enables or disables panicking when a parity error has been detected. @@ -616,12 +616,12 @@ PCI Device Whitelist: This control file allows for an explicit list of PCI devices to be scanned for parity errors. Only devices found on this list will - be examined. The list is a line of hexadecimel VENDOR and DEVICE + be examined. The list is a line of hexadecimal VENDOR and DEVICE ID tuples: 1022:7450,1434:16a6 - One or more can be inserted, seperated by a comma. + One or more can be inserted, separated by a comma. To write the above list doing the following as one command line: @@ -639,11 +639,11 @@ PCI Device Blacklist: This control file allows for a list of PCI devices to be skipped for scanning. - The list is a line of hexadecimel VENDOR and DEVICE ID tuples: + The list is a line of hexadecimal VENDOR and DEVICE ID tuples: 1022:7450,1434:16a6 - One or more can be inserted, seperated by a comma. + One or more can be inserted, separated by a comma. To write the above list doing the following as one command line: @@ -651,14 +651,14 @@ PCI Device Blacklist: > /sys/devices/system/edac/pci/pci_parity_blacklist - To display what the whitelist current contatins, + To display what the whitelist currently contains, simply 'cat' the same file. ======================================================================= PCI Vendor and Devices IDs can be obtained with the lspci command. Using the -n option lspci will display the vendor and device IDs. The system -adminstrator will have to determine which devices should be scanned or +administrator will have to determine which devices should be scanned or skipped. @@ -669,5 +669,5 @@ Turn OFF a whitelist by an empty echo command: echo > /sys/devices/system/edac/pci/pci_parity_whitelist -and any previous blacklist will be utililzed. +and any previous blacklist will be utilized. diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX index 74052d22d868..66fdc0744fe0 100644 --- a/Documentation/filesystems/00-INDEX +++ b/Documentation/filesystems/00-INDEX @@ -1,27 +1,47 @@ 00-INDEX - this file (info on some of the filesystems supported by linux). +Exporting + - explanation of how to make filesystems exportable. Locking - info on locking rules as they pertain to Linux VFS. adfs.txt - info and mount options for the Acorn Advanced Disc Filing System. +afs.txt + - info and examples for the distributed AFS (Andrew File System) fs. affs.txt - info and mount options for the Amiga Fast File System. +automount-support.txt + - information about filesystem automount support. +befs.txt + - information about the BeOS filesystem for Linux. bfs.txt - info for the SCO UnixWare Boot Filesystem (BFS). cifs.txt - - description of the CIFS filesystem + - description of the CIFS filesystem. coda.txt - description of the CODA filesystem. configfs/ - directory containing configfs documentation and example code. cramfs.txt - - info on the cram filesystem for small storage (ROMs etc) + - info on the cram filesystem for small storage (ROMs etc). +dentry-locking.txt + - info on the RCU-based dcache locking model. devfs/ - directory containing devfs documentation. +directory-locking + - info about the locking scheme used for directory operations. dlmfs.txt - info on the userspace interface to the OCFS2 DLM. ext2.txt - info, mount options and specifications for the Ext2 filesystem. +ext3.txt + - info, mount options and specifications for the Ext3 filesystem. +files.txt + - info on file management in the Linux kernel. +fuse.txt + - info on the Filesystem in User SpacE including mount options. +hfs.txt + - info on the Macintosh HFS Filesystem for Linux. hpfs.txt - info and mount options for the OS/2 HPFS. isofs.txt @@ -32,23 +52,43 @@ ncpfs.txt - info on Novell Netware(tm) filesystem using NCP protocol. ntfs.txt - info and mount options for the NTFS filesystem (Windows NT). -proc.txt - - info on Linux's /proc filesystem. ocfs2.txt - info and mount options for the OCFS2 clustered filesystem. +porting + - various information on filesystem porting. +proc.txt + - info on Linux's /proc filesystem. +ramfs-rootfs-initramfs.txt + - info on the 'in memory' filesystems ramfs, rootfs and initramfs. +reiser4.txt + - info on the Reiser4 filesystem based on dancing tree algorithms. +relayfs.txt + - info on relayfs, for efficient streaming from kernel to user space. romfs.txt - - Description of the ROMFS filesystem. + - description of the ROMFS filesystem. smbfs.txt - - info on using filesystems with the SMB protocol (Windows 3.11 and NT) + - info on using filesystems with the SMB protocol (Win 3.11 and NT). +spufs.txt + - info and mount options for the SPU filesystem used on Cell. +sysfs-pci.txt + - info on accessing PCI device resources through sysfs. +sysfs.txt + - info on sysfs, a ram-based filesystem for exporting kernel objects. sysv-fs.txt - info on the SystemV/V7/Xenix/Coherent filesystem. +tmpfs.txt + - info on tmpfs, a filesystem that holds all files in virtual memory. udf.txt - info and mount options for the UDF filesystem. ufs.txt - info on the ufs filesystem. +v9fs.txt + - v9fs is a Unix implementation of the Plan 9 9p remote fs protocol. vfat.txt - info on using the VFAT filesystem used in Windows NT and Windows 95 vfs.txt - - Overview of the Virtual File System + - overview of the Virtual File System xfs.txt - info and mount options for the XFS filesystem. +xip.txt + - info on execute-in-place for file mappings. diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt index aa7ba00ec082..171a44ebd939 100644 --- a/Documentation/ioctl-number.txt +++ b/Documentation/ioctl-number.txt @@ -78,8 +78,6 @@ Code Seq# Include File Comments '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem '1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> -'6' 00-10 <asm-i386/processor.h> Intel IA32 microcode update driver - <mailto:tigran@veritas.com> '8' all SNP8023 advanced NIC card <mailto:mcr@solidum.com> 'A' 00-1F linux/apm_bios.h diff --git a/Documentation/m68k/README.buddha b/Documentation/m68k/README.buddha index bf802ffc98ad..ef484a719bb9 100644 --- a/Documentation/m68k/README.buddha +++ b/Documentation/m68k/README.buddha @@ -29,7 +29,7 @@ address is written to $4a, then the whole Byte is written to $48, while it doesn't matter how often you're writing to $4a as long as $48 is not touched. After $48 has been written, the whole card disappears from $e8 and is mapped to the new -address just written. Make shure $4a is written before $48, +address just written. Make sure $4a is written before $48, otherwise your chance is only 1:16 to find the board :-). The local memory-map is even active when mapped to $e8: diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c index 545447ac503a..a12059886755 100644 --- a/Documentation/networking/ifenslave.c +++ b/Documentation/networking/ifenslave.c @@ -87,7 +87,7 @@ * would fail and generate an error message in the system log. * - For opt_c: slave should not be set to the master's setting * while it is running. It was already set during enslave. To - * simplify things, it is now handeled separately. + * simplify things, it is now handled separately. * * - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com> * - Code cleanup and style changes diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt index 3759acf95b29..6091e5f6794f 100644 --- a/Documentation/networking/vortex.txt +++ b/Documentation/networking/vortex.txt @@ -24,36 +24,44 @@ Since kernel 2.3.99-pre6, this driver incorporates the support for the This driver supports the following hardware: - 3c590 Vortex 10Mbps - 3c592 EISA 10mbps Demon/Vortex - 3c597 EISA Fast Demon/Vortex - 3c595 Vortex 100baseTx - 3c595 Vortex 100baseT4 - 3c595 Vortex 100base-MII - 3Com Vortex - 3c900 Boomerang 10baseT - 3c900 Boomerang 10Mbps Combo - 3c900 Cyclone 10Mbps TPO - 3c900B Cyclone 10Mbps T - 3c900 Cyclone 10Mbps Combo - 3c900 Cyclone 10Mbps TPC - 3c900B-FL Cyclone 10base-FL - 3c905 Boomerang 100baseTx - 3c905 Boomerang 100baseT4 - 3c905B Cyclone 100baseTx - 3c905B Cyclone 10/100/BNC - 3c905B-FX Cyclone 100baseFx - 3c905C Tornado - 3c980 Cyclone - 3cSOHO100-TX Hurricane - 3c555 Laptop Hurricane - 3c575 Boomerang CardBus - 3CCFE575 Cyclone CardBus - 3CCFE575CT Cyclone CardBus - 3CCFE656 Cyclone CardBus - 3CCFEM656 Cyclone CardBus - 3c450 Cyclone/unknown - + 3c590 Vortex 10Mbps + 3c592 EISA 10Mbps Demon/Vortex + 3c597 EISA Fast Demon/Vortex + 3c595 Vortex 100baseTx + 3c595 Vortex 100baseT4 + 3c595 Vortex 100base-MII + 3c900 Boomerang 10baseT + 3c900 Boomerang 10Mbps Combo + 3c900 Cyclone 10Mbps TPO + 3c900 Cyclone 10Mbps Combo + 3c900 Cyclone 10Mbps TPC + 3c900B-FL Cyclone 10base-FL + 3c905 Boomerang 100baseTx + 3c905 Boomerang 100baseT4 + 3c905B Cyclone 100baseTx + 3c905B Cyclone 10/100/BNC + 3c905B-FX Cyclone 100baseFx + 3c905C Tornado + 3c920B-EMB-WNM (ATI Radeon 9100 IGP) + 3c980 Cyclone + 3c980C Python-T + 3cSOHO100-TX Hurricane + 3c555 Laptop Hurricane + 3c556 Laptop Tornado + 3c556B Laptop Hurricane + 3c575 [Megahertz] 10/100 LAN CardBus + 3c575 Boomerang CardBus + 3CCFE575BT Cyclone CardBus + 3CCFE575CT Tornado CardBus + 3CCFE656 Cyclone CardBus + 3CCFEM656B Cyclone+Winmodem CardBus + 3CXFEM656C Tornado+Winmodem CardBus + 3c450 HomePNA Tornado + 3c920 Tornado + 3c982 Hydra Dual Port A + 3c982 Hydra Dual Port B + 3c905B-T4 + 3c920B-EMB-WNM Tornado Module parameters ================= @@ -293,11 +301,6 @@ Donald's wake-on-LAN page: http://www.scyld.com/wakeonlan.html -3Com's documentation for many NICs, including the ones supported by -this driver is available at - - http://support.3com.com/partners/developer/developer_form.html - 3Com's DOS-based application for setting up the NICs EEPROMs: ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe @@ -312,10 +315,10 @@ Autonegotiation notes --------------------- The driver uses a one-minute heartbeat for adapting to changes in - the external LAN environment. This means that when, for example, a - machine is unplugged from a hubbed 10baseT LAN plugged into a - switched 100baseT LAN, the throughput will be quite dreadful for up - to sixty seconds. Be patient. + the external LAN environment if link is up and 5 seconds if link is down. + This means that when, for example, a machine is unplugged from a hubbed + 10baseT LAN plugged into a switched 100baseT LAN, the throughput + will be quite dreadful for up to sixty seconds. Be patient. Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>: diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt index af0f6eabfa1c..9529c9c9fd59 100644 --- a/Documentation/pnp.txt +++ b/Documentation/pnp.txt @@ -115,6 +115,9 @@ pnp_unregister_protocol pnp_register_driver - adds a PnP driver to the Plug and Play Layer - this includes driver model integration +- returns zero for success or a negative error number for failure; count + calls to the .add() method if you need to know how many devices bind to + the driver pnp_unregister_driver - removes a PnP driver from the Plug and Play Layer diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt new file mode 100644 index 000000000000..8529a17ffaa1 --- /dev/null +++ b/Documentation/robust-futex-ABI.txt @@ -0,0 +1,182 @@ +Started by Paul Jackson <pj@sgi.com> + +The robust futex ABI +-------------------- + +Robust_futexes provide a mechanism that is used in addition to normal +futexes, for kernel assist of cleanup of held locks on task exit. + +The interesting data as to what futexes a thread is holding is kept on a +linked list in user space, where it can be updated efficiently as locks +are taken and dropped, without kernel intervention. The only additional +kernel intervention required for robust_futexes above and beyond what is +required for futexes is: + + 1) a one time call, per thread, to tell the kernel where its list of + held robust_futexes begins, and + 2) internal kernel code at exit, to handle any listed locks held + by the exiting thread. + +The existing normal futexes already provide a "Fast Userspace Locking" +mechanism, which handles uncontested locking without needing a system +call, and handles contested locking by maintaining a list of waiting +threads in the kernel. Options on the sys_futex(2) system call support +waiting on a particular futex, and waking up the next waiter on a +particular futex. + +For robust_futexes to work, the user code (typically in a library such +as glibc linked with the application) has to manage and place the +necessary list elements exactly as the kernel expects them. If it fails +to do so, then improperly listed locks will not be cleaned up on exit, +probably causing deadlock or other such failure of the other threads +waiting on the same locks. + +A thread that anticipates possibly using robust_futexes should first +issue the system call: + + asmlinkage long + sys_set_robust_list(struct robust_list_head __user *head, size_t len); + +The pointer 'head' points to a structure in the threads address space +consisting of three words. Each word is 32 bits on 32 bit arch's, or 64 +bits on 64 bit arch's, and local byte order. Each thread should have +its own thread private 'head'. + +If a thread is running in 32 bit compatibility mode on a 64 native arch +kernel, then it can actually have two such structures - one using 32 bit +words for 32 bit compatibility mode, and one using 64 bit words for 64 +bit native mode. The kernel, if it is a 64 bit kernel supporting 32 bit +compatibility mode, will attempt to process both lists on each task +exit, if the corresponding sys_set_robust_list() call has been made to +setup that list. + + The first word in the memory structure at 'head' contains a + pointer to a single linked list of 'lock entries', one per lock, + as described below. If the list is empty, the pointer will point + to itself, 'head'. The last 'lock entry' points back to the 'head'. + + The second word, called 'offset', specifies the offset from the + address of the associated 'lock entry', plus or minus, of what will + be called the 'lock word', from that 'lock entry'. The 'lock word' + is always a 32 bit word, unlike the other words above. The 'lock + word' holds 3 flag bits in the upper 3 bits, and the thread id (TID) + of the thread holding the lock in the bottom 29 bits. See further + below for a description of the flag bits. + + The third word, called 'list_op_pending', contains transient copy of + the address of the 'lock entry', during list insertion and removal, + and is needed to correctly resolve races should a thread exit while + in the middle of a locking or unlocking operation. + +Each 'lock entry' on the single linked list starting at 'head' consists +of just a single word, pointing to the next 'lock entry', or back to +'head' if there are no more entries. In addition, nearby to each 'lock +entry', at an offset from the 'lock entry' specified by the 'offset' +word, is one 'lock word'. + +The 'lock word' is always 32 bits, and is intended to be the same 32 bit +lock variable used by the futex mechanism, in conjunction with +robust_futexes. The kernel will only be able to wakeup the next thread +waiting for a lock on a threads exit if that next thread used the futex +mechanism to register the address of that 'lock word' with the kernel. + +For each futex lock currently held by a thread, if it wants this +robust_futex support for exit cleanup of that lock, it should have one +'lock entry' on this list, with its associated 'lock word' at the +specified 'offset'. Should a thread die while holding any such locks, +the kernel will walk this list, mark any such locks with a bit +indicating their holder died, and wakeup the next thread waiting for +that lock using the futex mechanism. + +When a thread has invoked the above system call to indicate it +anticipates using robust_futexes, the kernel stores the passed in 'head' +pointer for that task. The task may retrieve that value later on by +using the system call: + + asmlinkage long + sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, + size_t __user *len_ptr); + +It is anticipated that threads will use robust_futexes embedded in +larger, user level locking structures, one per lock. The kernel +robust_futex mechanism doesn't care what else is in that structure, so +long as the 'offset' to the 'lock word' is the same for all +robust_futexes used by that thread. The thread should link those locks +it currently holds using the 'lock entry' pointers. It may also have +other links between the locks, such as the reverse side of a double +linked list, but that doesn't matter to the kernel. + +By keeping its locks linked this way, on a list starting with a 'head' +pointer known to the kernel, the kernel can provide to a thread the +essential service available for robust_futexes, which is to help clean +up locks held at the time of (a perhaps unexpectedly) exit. + +Actual locking and unlocking, during normal operations, is handled +entirely by user level code in the contending threads, and by the +existing futex mechanism to wait for, and wakeup, locks. The kernels +only essential involvement in robust_futexes is to remember where the +list 'head' is, and to walk the list on thread exit, handling locks +still held by the departing thread, as described below. + +There may exist thousands of futex lock structures in a threads shared +memory, on various data structures, at a given point in time. Only those +lock structures for locks currently held by that thread should be on +that thread's robust_futex linked lock list a given time. + +A given futex lock structure in a user shared memory region may be held +at different times by any of the threads with access to that region. The +thread currently holding such a lock, if any, is marked with the threads +TID in the lower 29 bits of the 'lock word'. + +When adding or removing a lock from its list of held locks, in order for +the kernel to correctly handle lock cleanup regardless of when the task +exits (perhaps it gets an unexpected signal 9 in the middle of +manipulating this list), the user code must observe the following +protocol on 'lock entry' insertion and removal: + +On insertion: + 1) set the 'list_op_pending' word to the address of the 'lock word' + to be inserted, + 2) acquire the futex lock, + 3) add the lock entry, with its thread id (TID) in the bottom 29 bits + of the 'lock word', to the linked list starting at 'head', and + 4) clear the 'list_op_pending' word. + +On removal: + 1) set the 'list_op_pending' word to the address of the 'lock word' + to be removed, + 2) remove the lock entry for this lock from the 'head' list, + 2) release the futex lock, and + 2) clear the 'lock_op_pending' word. + +On exit, the kernel will consider the address stored in +'list_op_pending' and the address of each 'lock word' found by walking +the list starting at 'head'. For each such address, if the bottom 29 +bits of the 'lock word' at offset 'offset' from that address equals the +exiting threads TID, then the kernel will do two things: + + 1) if bit 31 (0x80000000) is set in that word, then attempt a futex + wakeup on that address, which will waken the next thread that has + used to the futex mechanism to wait on that address, and + 2) atomically set bit 30 (0x40000000) in the 'lock word'. + +In the above, bit 31 was set by futex waiters on that lock to indicate +they were waiting, and bit 30 is set by the kernel to indicate that the +lock owner died holding the lock. + +The kernel exit code will silently stop scanning the list further if at +any point: + + 1) the 'head' pointer or an subsequent linked list pointer + is not a valid address of a user space word + 2) the calculated location of the 'lock word' (address plus + 'offset') is not the valud address of a 32 bit user space + word + 3) if the list contains more than 1 million (subject to + future kernel configuration changes) elements. + +When the kernel sees a list entry whose 'lock word' doesn't have the +current threads TID in the lower 29 bits, it does nothing with that +entry, and goes on to the next entry. + +Bit 29 (0x20000000) of the 'lock word' is reserved for future use. diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt new file mode 100644 index 000000000000..df82d75245a0 --- /dev/null +++ b/Documentation/robust-futexes.txt @@ -0,0 +1,218 @@ +Started by: Ingo Molnar <mingo@redhat.com> + +Background +---------- + +what are robust futexes? To answer that, we first need to understand +what futexes are: normal futexes are special types of locks that in the +noncontended case can be acquired/released from userspace without having +to enter the kernel. + +A futex is in essence a user-space address, e.g. a 32-bit lock variable +field. If userspace notices contention (the lock is already owned and +someone else wants to grab it too) then the lock is marked with a value +that says "there's a waiter pending", and the sys_futex(FUTEX_WAIT) +syscall is used to wait for the other guy to release it. The kernel +creates a 'futex queue' internally, so that it can later on match up the +waiter with the waker - without them having to know about each other. +When the owner thread releases the futex, it notices (via the variable +value) that there were waiter(s) pending, and does the +sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have +taken and released the lock, the futex is again back to 'uncontended' +state, and there's no in-kernel state associated with it. The kernel +completely forgets that there ever was a futex at that address. This +method makes futexes very lightweight and scalable. + +"Robustness" is about dealing with crashes while holding a lock: if a +process exits prematurely while holding a pthread_mutex_t lock that is +also shared with some other process (e.g. yum segfaults while holding a +pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need +to be notified that the last owner of the lock exited in some irregular +way. + +To solve such types of problems, "robust mutex" userspace APIs were +created: pthread_mutex_lock() returns an error value if the owner exits +prematurely - and the new owner can decide whether the data protected by +the lock can be recovered safely. + +There is a big conceptual problem with futex based mutexes though: it is +the kernel that destroys the owner task (e.g. due to a SEGFAULT), but +the kernel cannot help with the cleanup: if there is no 'futex queue' +(and in most cases there is none, futexes being fast lightweight locks) +then the kernel has no information to clean up after the held lock! +Userspace has no chance to clean up after the lock either - userspace is +the one that crashes, so it has no opportunity to clean up. Catch-22. + +In practice, when e.g. yum is kill -9-ed (or segfaults), a system reboot +is needed to release that futex based lock. This is one of the leading +bugreports against yum. + +To solve this problem, the traditional approach was to extend the vma +(virtual memory area descriptor) concept to have a notion of 'pending +robust futexes attached to this area'. This approach requires 3 new +syscall variants to sys_futex(): FUTEX_REGISTER, FUTEX_DEREGISTER and +FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether +they have a robust_head set. This approach has two fundamental problems +left: + + - it has quite complex locking and race scenarios. The vma-based + approach had been pending for years, but they are still not completely + reliable. + + - they have to scan _every_ vma at sys_exit() time, per thread! + +The second disadvantage is a real killer: pthread_exit() takes around 1 +microsecond on Linux, but with thousands (or tens of thousands) of vmas +every pthread_exit() takes a millisecond or more, also totally +destroying the CPU's L1 and L2 caches! + +This is very much noticeable even for normal process sys_exit_group() +calls: the kernel has to do the vma scanning unconditionally! (this is +because the kernel has no knowledge about how many robust futexes there +are to be cleaned up, because a robust futex might have been registered +in another task, and the futex variable might have been simply mmap()-ed +into this process's address space). + +This huge overhead forced the creation of CONFIG_FUTEX_ROBUST so that +normal kernels can turn it off, but worse than that: the overhead makes +robust futexes impractical for any type of generic Linux distribution. + +So something had to be done. + +New approach to robust futexes +------------------------------ + +At the heart of this new approach there is a per-thread private list of +robust locks that userspace is holding (maintained by glibc) - which +userspace list is registered with the kernel via a new syscall [this +registration happens at most once per thread lifetime]. At do_exit() +time, the kernel checks this user-space list: are there any robust futex +locks to be cleaned up? + +In the common case, at do_exit() time, there is no list registered, so +the cost of robust futexes is just a simple current->robust_list != NULL +comparison. If the thread has registered a list, then normally the list +is empty. If the thread/process crashed or terminated in some incorrect +way then the list might be non-empty: in this case the kernel carefully +walks the list [not trusting it], and marks all locks that are owned by +this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if +any). + +The list is guaranteed to be private and per-thread at do_exit() time, +so it can be accessed by the kernel in a lockless way. + +There is one race possible though: since adding to and removing from the +list is done after the futex is acquired by glibc, there is a few +instructions window for the thread (or process) to die there, leaving +the futex hung. To protect against this possibility, userspace (glibc) +also maintains a simple per-thread 'list_op_pending' field, to allow the +kernel to clean up if the thread dies after acquiring the lock, but just +before it could have added itself to the list. Glibc sets this +list_op_pending field before it tries to acquire the futex, and clears +it after the list-add (or list-remove) has finished. + +That's all that is needed - all the rest of robust-futex cleanup is done +in userspace [just like with the previous patches]. + +Ulrich Drepper has implemented the necessary glibc support for this new +mechanism, which fully enables robust mutexes. + +Key differences of this userspace-list based approach, compared to the +vma based method: + + - it's much, much faster: at thread exit time, there's no need to loop + over every vma (!), which the VM-based method has to do. Only a very + simple 'is the list empty' op is done. + + - no VM changes are needed - 'struct address_space' is left alone. + + - no registration of individual locks is needed: robust mutexes dont + need any extra per-lock syscalls. Robust mutexes thus become a very + lightweight primitive - so they dont force the application designer + to do a hard choice between performance and robustness - robust + mutexes are just as fast. + + - no per-lock kernel allocation happens. + + - no resource limits are needed. + + - no kernel-space recovery call (FUTEX_RECOVER) is needed. + + - the implementation and the locking is "obvious", and there are no + interactions with the VM. + +Performance +----------- + +I have benchmarked the time needed for the kernel to process a list of 1 +million (!) held locks, using the new method [on a 2GHz CPU]: + + - with FUTEX_WAIT set [contended mutex]: 130 msecs + - without FUTEX_WAIT set [uncontended mutex]: 30 msecs + +I have also measured an approach where glibc does the lock notification +[which it currently does for !pshared robust mutexes], and that took 256 +msecs - clearly slower, due to the 1 million FUTEX_WAKE syscalls +userspace had to do. + +(1 million held locks are unheard of - we expect at most a handful of +locks to be held at a time. Nevertheless it's nice to know that this +approach scales nicely.) + +Implementation details +---------------------- + +The patch adds two new syscalls: one to register the userspace list, and +one to query the registered list pointer: + + asmlinkage long + sys_set_robust_list(struct robust_list_head __user *head, + size_t len); + + asmlinkage long + sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, + size_t __user *len_ptr); + +List registration is very fast: the pointer is simply stored in +current->robust_list. [Note that in the future, if robust futexes become +widespread, we could extend sys_clone() to register a robust-list head +for new threads, without the need of another syscall.] + +So there is virtually zero overhead for tasks not using robust futexes, +and even for robust futex users, there is only one extra syscall per +thread lifetime, and the cleanup operation, if it happens, is fast and +straightforward. The kernel doesnt have any internal distinction between +robust and normal futexes. + +If a futex is found to be held at exit time, the kernel sets the +following bit of the futex word: + + #define FUTEX_OWNER_DIED 0x40000000 + +and wakes up the next futex waiter (if any). User-space does the rest of +the cleanup. + +Otherwise, robust futexes are acquired by glibc by putting the TID into +the futex field atomically. Waiters set the FUTEX_WAITERS bit: + + #define FUTEX_WAITERS 0x80000000 + +and the remaining bits are for the TID. + +Testing, architecture support +----------------------------- + +i've tested the new syscalls on x86 and x86_64, and have made sure the +parsing of the userspace list is robust [ ;-) ] even if the list is +deliberately corrupted. + +i386 and x86_64 syscalls are wired up at the moment, and Ulrich has +tested the new glibc code (on x86_64 and i386), and it works for his +robust-mutex testcases. + +All other architectures should build just fine too - but they wont have +the new syscalls yet. + +Architectures need to implement the new futex_atomic_cmpxchg_inatomic() +inline function before writing up the syscalls (that function returns +-ENOSYS right now). diff --git a/Documentation/rpc-cache.txt b/Documentation/rpc-cache.txt index 2b5d4434fa5a..5f757c8cf979 100644 --- a/Documentation/rpc-cache.txt +++ b/Documentation/rpc-cache.txt @@ -1,4 +1,4 @@ -This document gives a brief introduction to the caching + This document gives a brief introduction to the caching mechanisms in the sunrpc layer that is used, in particular, for NFS authentication. @@ -25,25 +25,17 @@ The common code handles such things as: - supporting 'NEGATIVE' as well as positive entries - allowing an EXPIRED time on cache items, and removing items after they expire, and are no longe in-use. - - Future code extensions are expect to handle - making requests to user-space to fill in cache entries - allowing user-space to directly set entries in the cache - delaying RPC requests that depend on as-yet incomplete cache entries, and replaying those requests when the cache entry is complete. - - maintaining last-access times on cache entries - - clean out old entries when the caches become full - -The code for performing a cache lookup is also common, but in the form -of a template. i.e. a #define. -Each cache defines a lookup function by using the DefineCacheLookup -macro, or the simpler DefineSimpleCacheLookup macro + - clean out old entries as they expire. Creating a Cache ---------------- -1/ A cache needs a datum to cache. This is in the form of a +1/ A cache needs a datum to store. This is in the form of a structure definition that must contain a struct cache_head as an element, usually the first. @@ -51,35 +43,69 @@ Creating a Cache Each cache element is reference counted and contains expiry and update times for use in cache management. 2/ A cache needs a "cache_detail" structure that - describes the cache. This stores the hash table, and some - parameters for cache management. -3/ A cache needs a lookup function. This is created using - the DefineCacheLookup macro. This lookup function is used both - to find entries and to update entries. The normal mode for - updating an entry is to replace the old entry with a new - entry. However it is possible to allow update-in-place - for those caches where it makes sense (no atomicity issues - or indirect reference counting issue) -4/ A cache needs to be registered using cache_register(). This - includes in on a list of caches that will be regularly - cleaned to discard old data. For this to work, some - thread must periodically call cache_clean - + describes the cache. This stores the hash table, some + parameters for cache management, and some operations detailing how + to work with particular cache items. + The operations requires are: + struct cache_head *alloc(void) + This simply allocates appropriate memory and returns + a pointer to the cache_detail embedded within the + structure + void cache_put(struct kref *) + This is called when the last reference to an item is + is dropped. The pointer passed is to the 'ref' field + in the cache_head. cache_put should release any + references create by 'cache_init' and, if CACHE_VALID + is set, any references created by cache_update. + It should then release the memory allocated by + 'alloc'. + int match(struct cache_head *orig, struct cache_head *new) + test if the keys in the two structures match. Return + 1 if they do, 0 if they don't. + void init(struct cache_head *orig, struct cache_head *new) + Set the 'key' fields in 'new' from 'orig'. This may + include taking references to shared objects. + void update(struct cache_head *orig, struct cache_head *new) + Set the 'content' fileds in 'new' from 'orig'. + int cache_show(struct seq_file *m, struct cache_detail *cd, + struct cache_head *h) + Optional. Used to provide a /proc file that lists the + contents of a cache. This should show one item, + usually on just one line. + int cache_request(struct cache_detail *cd, struct cache_head *h, + char **bpp, int *blen) + Format a request to be send to user-space for an item + to be instantiated. *bpp is a buffer of size *blen. + bpp should be moved forward over the encoded message, + and *blen should be reduced to show how much free + space remains. Return 0 on success or <0 if not + enough room or other problem. + int cache_parse(struct cache_detail *cd, char *buf, int len) + A message from user space has arrived to fill out a + cache entry. It is in 'buf' of length 'len'. + cache_parse should parse this, find the item in the + cache with sunrpc_cache_lookup, and update the item + with sunrpc_cache_update. + + +3/ A cache needs to be registered using cache_register(). This + includes it on a list of caches that will be regularly + cleaned to discard old data. + Using a cache ------------- -To find a value in a cache, call the lookup function passing it a the -datum which contains key, and possibly content, and a flag saying -whether to update the cache with new data from the datum. Depending -on how the cache lookup function was defined, it may take an extra -argument to identify the particular cache in question. +To find a value in a cache, call sunrpc_cache_lookup passing a pointer +to the cache_head in a sample item with the 'key' fields filled in. +This will be passed to ->match to identify the target entry. If no +entry is found, a new entry will be create, added to the cache, and +marked as not containing valid data. -Except in cases of kmalloc failure, the lookup function -will return a new datum which will store the key and -may contain valid content, or may not. -This datum is typically passed to cache_check which determines the -validity of the datum and may later initiate an upcall to fill -in the data. +The item returned is typically passed to cache_check which will check +if the data is valid, and may initiate an up-call to get fresh data. +cache_check will return -ENOENT in the entry is negative or if an up +call is needed but not possible, -EAGAIN if an upcall is pending, +or 0 if the data is valid; cache_check can be passed a "struct cache_req *". This structure is typically embedded in the actual request and can be used to create a @@ -90,6 +116,13 @@ item does become valid, the deferred copy of the request will be revisited (->revisit). It is expected that this method will reschedule the request for processing. +The value returned by sunrpc_cache_lookup can also be passed to +sunrpc_cache_update to set the content for the item. A second item is +passed which should hold the content. If the item found by _lookup +has valid data, then it is discarded and a new item is created. This +saves any user of an item from worrying about content changing while +it is being inspected. If the item found by _lookup does not contain +valid data, then the content is copied across and CACHE_VALID is set. Populating a cache ------------------ @@ -114,8 +147,8 @@ should be create or updated to have the given content, and the expiry time should be set on that item. Reading from a channel is a bit more interesting. When a cache -lookup fail, or when it suceeds but finds an entry that may soon -expiry, a request is lodged for that cache item to be updated by +lookup fails, or when it succeeds but finds an entry that may soon +expire, a request is lodged for that cache item to be updated by user-space. These requests appear in the channel file. Successive reads will return successive requests. @@ -130,7 +163,7 @@ Thus a user-space helper is likely to: write a response loop. -If it dies and needs to be restarted, any requests that have not be +If it dies and needs to be restarted, any requests that have not been answered will still appear in the file and will be read by the new instance of the helper. @@ -142,10 +175,9 @@ Each cache should also define a "cache_request" method which takes a cache item and encodes a request into the buffer provided. - Note: If a cache has no active readers on the channel, and has had not active readers for more than 60 seconds, further requests will not be -added to the channel but instead all looks that do not find a valid +added to the channel but instead all lookups that do not find a valid entry will fail. This is partly for backward compatibility: The previous nfs exports table was deemed to be authoritative and a failed lookup meant a definite 'no'. @@ -154,18 +186,17 @@ request/response format ----------------------- While each cache is free to use it's own format for requests -and responses over channel, the following is recommended are +and responses over channel, the following is recommended as appropriate and support routines are available to help: Each request or response record should be printable ASCII with precisely one newline character which should be at the end. Fields within the record should be separated by spaces, normally one. If spaces, newlines, or nul characters are needed in a field they -much be quotes. two mechanisms are available: +much be quoted. two mechanisms are available: 1/ If a field begins '\x' then it must contain an even number of hex digits, and pairs of these digits provide the bytes in the field. 2/ otherwise a \ in the field must be followed by 3 octal digits which give the code for a byte. Other characters are treated - as them selves. At the very least, space, newlines nul, and + as them selves. At the very least, space, newline, nul, and '\' must be quoted in this way. - diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl index 6dc9d9f622ca..6feef9e82b63 100644 --- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl +++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl @@ -2836,7 +2836,7 @@ struct _snd_pcm_runtime { <para> Note that this callback became non-atomic since the recent version. - You can use schedule-related fucntions safely in this callback now. + You can use schedule-related functions safely in this callback now. </para> <para> diff --git a/MAINTAINERS b/MAINTAINERS index 4e8fbbc5566d..e5b051f0e27e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -882,13 +882,34 @@ W: http://ebtables.sourceforge.net/ S: Maintained EDAC-CORE -P: Doug Thompson -M: norsk5@xmission.com, dthompson@linuxnetworx.com -P: Dave Peterson -M: dsp@llnl.gov, dave_peterson@pobox.com -L: bluesmoke-devel@lists.sourceforge.net -W: bluesmoke.sourceforge.net -S: Maintained +P: Doug Thompson +M: norsk5@xmission.com, dthompson@linuxnetworx.com +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-E752X +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-E7XXX +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-R82600 +P: Tim Small +M: tim@buttersideup.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained EEPRO100 NETWORK DRIVER P: Andrey V. Savochkin @@ -1039,6 +1060,15 @@ M: khc@pm.waw.pl W: http://www.kernel.org/pub/linux/utils/net/hdlc/ S: Maintained +GIGASET ISDN DRIVERS +P: Hansjoerg Lipp +M: hjlipp@web.de +P: Tilman Schmidt +M: tilman@imap.cc +L: gigaset307x-common@lists.sourceforge.net +W: http://gigaset307x.sourceforge.net/ +S: Maintained + HARDWARE MONITORING P: Jean Delvare M: khali@linux-fr.org @@ -2203,6 +2233,12 @@ M: p_gortmaker@yahoo.com L: linux-kernel@vger.kernel.org S: Maintained +REAL TIME CLOCK (RTC) SUBSYSTEM +P: Alessandro Zummo +M: a.zummo@towertech.it +L: linux-kernel@vger.kernel.org +S: Maintained + REISERFS FILE SYSTEM P: Hans Reiser M: reiserfs-dev@namesys.com diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index eedf41bf7057..9bef61b30367 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -25,6 +25,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -447,6 +451,10 @@ config ALPHA_IRONGATE depends on ALPHA_NAUTILUS default y +config GENERIC_HWEIGHT + bool + default y if !ALPHA_EV6 && !ALPHA_EV67 + config ALPHA_AVANTI bool depends on ALPHA_XL || ALPHA_AVANTI_CH diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 7fb14f42a125..31afe3d91ac6 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -821,7 +821,6 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; -extern int do_adjtimex(struct timex *); struct timeval32 { diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b4e5f8ff2b25..dd8769670596 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -34,6 +34,7 @@ #include <linux/root_dev.h> #include <linux/initrd.h> #include <linux/eisa.h> +#include <linux/pfn.h> #ifdef CONFIG_MAGIC_SYSRQ #include <linux/sysrq.h> #include <linux/reboot.h> @@ -42,7 +43,7 @@ #include <asm/setup.h> #include <asm/io.h> -extern struct notifier_block *panic_notifier_list; +extern struct atomic_notifier_head panic_notifier_list; static int alpha_panic_event(struct notifier_block *, unsigned long, void *); static struct notifier_block alpha_panic_block = { alpha_panic_event, @@ -241,9 +242,6 @@ reserve_std_resources(void) request_resource(io, standard_io_resources+i); } -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) #define PFN_MAX PFN_DOWN(0x80000000) #define for_each_mem_cluster(memdesc, cluster, i) \ for ((cluster) = (memdesc)->cluster, (i) = 0; \ @@ -472,11 +470,6 @@ page_is_ram(unsigned long pfn) return 0; } -#undef PFN_UP -#undef PFN_DOWN -#undef PFN_PHYS -#undef PFN_MAX - void __init setup_arch(char **cmdline_p) { @@ -507,7 +500,8 @@ setup_arch(char **cmdline_p) } /* Register a call for panic conditions. */ - notifier_chain_register(&panic_notifier_list, &alpha_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, + &alpha_panic_block); #ifdef CONFIG_ALPHA_GENERIC /* Assume that we've booted from SRM if we haven't booted from MILO. diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 6b2921be1909..3859749810b4 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -314,10 +314,11 @@ time_init(void) if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); - cc1 = rpcc_after_update_in_progress(); + cc1 = rpcc(); /* Calibrate CPU clock -- attempt #2. */ if (!est_cycle_freq) { + cc1 = rpcc_after_update_in_progress(); cc2 = rpcc_after_update_in_progress(); est_cycle_freq = validate_cc_value(cc2 - cc1); cc1 = cc2; diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S index a8e843dbcc23..1a5f71b9d8b1 100644 --- a/arch/alpha/lib/ev6-memchr.S +++ b/arch/alpha/lib/ev6-memchr.S @@ -84,7 +84,7 @@ $last_quad: beq $2, $not_found # U : U L U L $found_it: -#if defined(__alpha_fix__) && defined(__alpha_cix__) +#ifdef CONFIG_ALPHA_EV67 /* * Since we are guaranteed to have set one of the bits, we don't * have to worry about coming back with a 0x40 out of cttz... diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 97c4d9d7a4d5..05017ba34c3c 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -4,7 +4,7 @@ * (C) Copyright 1998 Linus Torvalds */ -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); #else #define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); @@ -53,7 +53,7 @@ alpha_read_fp_reg (unsigned long reg) return val; } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); #else #define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); @@ -98,7 +98,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val) } } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); #else #define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); @@ -147,7 +147,7 @@ alpha_read_fp_reg_s (unsigned long reg) return val; } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); #else #define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 6d5251254f68..bf6b65c81bef 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -13,6 +13,7 @@ #include <linux/bootmem.h> #include <linux/swap.h> #include <linux/initrd.h> +#include <linux/pfn.h> #include <asm/hwrpb.h> #include <asm/pgalloc.h> @@ -27,9 +28,6 @@ bootmem_data_t node_bdata[MAX_NUMNODES]; #define DBGDCONT(args...) #endif -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) #define for_each_mem_cluster(memdesc, cluster, i) \ for ((cluster) = (memdesc)->cluster, (i) = 0; \ (i) < (memdesc)->numclusters; (i)++, (cluster)++) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0dd24ebdf6ac..9731b3f826ab 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration" config ARM bool default y + select RTC_LIB help The ARM series is a line of low-power-consumption RISC chip designs licensed by ARM Ltd and targeted at embedded applications and @@ -53,6 +54,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -835,6 +840,8 @@ source "drivers/usb/Kconfig" source "drivers/mmc/Kconfig" +source "drivers/rtc/Kconfig" + endmenu source "fs/Kconfig" diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index e851d86c212c..35c9a64ac14c 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c @@ -20,6 +20,7 @@ #include <linux/capability.h> #include <linux/device.h> #include <linux/mutex.h> +#include <linux/rtc.h> #include <asm/rtc.h> #include <asm/semaphore.h> @@ -42,89 +43,6 @@ static struct rtc_ops *rtc_ops; #define rtc_epoch 1900UL -static const unsigned char days_in_month[] = { - 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 -}; - -#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400) -#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400)) - -static int month_days(unsigned int month, unsigned int year) -{ - return days_in_month[month] + (LEAP_YEAR(year) && month == 1); -} - -/* - * Convert seconds since 01-01-1970 00:00:00 to Gregorian date. - */ -void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) -{ - int days, month, year; - - days = time / 86400; - time -= days * 86400; - - tm->tm_wday = (days + 4) % 7; - - year = 1970 + days / 365; - days -= (year - 1970) * 365 - + LEAPS_THRU_END_OF(year - 1) - - LEAPS_THRU_END_OF(1970 - 1); - if (days < 0) { - year -= 1; - days += 365 + LEAP_YEAR(year); - } - tm->tm_year = year - 1900; - tm->tm_yday = days + 1; - - for (month = 0; month < 11; month++) { - int newdays; - - newdays = days - month_days(month, year); - if (newdays < 0) - break; - days = newdays; - } - tm->tm_mon = month; - tm->tm_mday = days + 1; - - tm->tm_hour = time / 3600; - time -= tm->tm_hour * 3600; - tm->tm_min = time / 60; - tm->tm_sec = time - tm->tm_min * 60; -} -EXPORT_SYMBOL(rtc_time_to_tm); - -/* - * Does the rtc_time represent a valid date/time? - */ -int rtc_valid_tm(struct rtc_time *tm) -{ - if (tm->tm_year < 70 || - tm->tm_mon >= 12 || - tm->tm_mday < 1 || - tm->tm_mday > month_days(tm->tm_mon, tm->tm_year + 1900) || - tm->tm_hour >= 24 || - tm->tm_min >= 60 || - tm->tm_sec >= 60) - return -EINVAL; - - return 0; -} -EXPORT_SYMBOL(rtc_valid_tm); - -/* - * Convert Gregorian date to seconds since 01-01-1970 00:00:00. - */ -int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) -{ - *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - - return 0; -} -EXPORT_SYMBOL(rtc_tm_to_time); - /* * Calculate the next alarm time given the requested alarm time mask * and the current time. @@ -151,13 +69,13 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc } } -static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) { memset(tm, 0, sizeof(struct rtc_time)); return ops->read_time(tm); } -static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm) { int ret; @@ -168,7 +86,7 @@ static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) return ret; } -static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) +static inline int rtc_arm_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) { int ret = -EINVAL; if (ops->read_alarm) { @@ -178,7 +96,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) return ret; } -static inline int rtc_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) +static inline int rtc_arm_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) { int ret = -EINVAL; if (ops->set_alarm) @@ -266,7 +184,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, switch (cmd) { case RTC_ALM_READ: - ret = rtc_read_alarm(ops, &alrm); + ret = rtc_arm_read_alarm(ops, &alrm); if (ret) break; ret = copy_to_user(uarg, &alrm.time, sizeof(tm)); @@ -288,11 +206,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, alrm.time.tm_wday = -1; alrm.time.tm_yday = -1; alrm.time.tm_isdst = -1; - ret = rtc_set_alarm(ops, &alrm); + ret = rtc_arm_set_alarm(ops, &alrm); break; case RTC_RD_TIME: - ret = rtc_read_time(ops, &tm); + ret = rtc_arm_read_time(ops, &tm); if (ret) break; ret = copy_to_user(uarg, &tm, sizeof(tm)); @@ -310,7 +228,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ret = -EFAULT; break; } - ret = rtc_set_time(ops, &tm); + ret = rtc_arm_set_time(ops, &tm); break; case RTC_EPOCH_SET: @@ -341,11 +259,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ret = -EFAULT; break; } - ret = rtc_set_alarm(ops, &alrm); + ret = rtc_arm_set_alarm(ops, &alrm); break; case RTC_WKALM_RD: - ret = rtc_read_alarm(ops, &alrm); + ret = rtc_arm_read_alarm(ops, &alrm); if (ret) break; ret = copy_to_user(uarg, &alrm, sizeof(alrm)); @@ -435,7 +353,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo struct rtc_time tm; char *p = page; - if (rtc_read_time(ops, &tm) == 0) { + if (rtc_arm_read_time(ops, &tm) == 0) { p += sprintf(p, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" @@ -445,7 +363,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo rtc_epoch); } - if (rtc_read_alarm(ops, &alrm) == 0) { + if (rtc_arm_read_alarm(ops, &alrm) == 0) { p += sprintf(p, "alrm_time\t: "); if ((unsigned int)alrm.time.tm_hour <= 24) p += sprintf(p, "%02d:", alrm.time.tm_hour); diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S index 838e435e4922..cab355c0c1f7 100644 --- a/arch/arm/lib/copy_template.S +++ b/arch/arm/lib/copy_template.S @@ -236,7 +236,7 @@ /* - * Abort preanble and completion macros. + * Abort preamble and completion macros. * If a fixup handler is required then those macros must surround it. * It is assumed that the fixup code will handle the private part of * the exit macro. diff --git a/arch/arm/mach-footbridge/time.c b/arch/arm/mach-footbridge/time.c index 2c64a0b0502e..5d02e95dede3 100644 --- a/arch/arm/mach-footbridge/time.c +++ b/arch/arm/mach-footbridge/time.c @@ -34,27 +34,12 @@ static int rtc_base; static unsigned long __init get_isa_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; // check to see if the RTC makes sense..... if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0) return mktime(1970, 1, 1, 0, 0, 0); - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - - do { /* Isn't this overkill ? UIP above should guarantee consistency */ + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c index 3c22c16b38bf..bc07f52a6fd7 100644 --- a/arch/arm/mach-integrator/time.c +++ b/arch/arm/mach-integrator/time.c @@ -40,13 +40,13 @@ static int integrator_set_rtc(void) return 1; } -static int rtc_read_alarm(struct rtc_wkalrm *alrm) +static int integrator_rtc_read_alarm(struct rtc_wkalrm *alrm) { rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); return 0; } -static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) +static inline int integrator_rtc_set_alarm(struct rtc_wkalrm *alrm) { unsigned long time; int ret; @@ -62,7 +62,7 @@ static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) return ret; } -static int rtc_read_time(struct rtc_time *tm) +static int integrator_rtc_read_time(struct rtc_time *tm) { rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); return 0; @@ -76,7 +76,7 @@ static int rtc_read_time(struct rtc_time *tm) * edge of the 1Hz clock, we must write the time one second * in advance. */ -static inline int rtc_set_time(struct rtc_time *tm) +static inline int integrator_rtc_set_time(struct rtc_time *tm) { unsigned long time; int ret; @@ -90,10 +90,10 @@ static inline int rtc_set_time(struct rtc_time *tm) static struct rtc_ops rtc_ops = { .owner = THIS_MODULE, - .read_time = rtc_read_time, - .set_time = rtc_set_time, - .read_alarm = rtc_read_alarm, - .set_alarm = rtc_set_alarm, + .read_time = integrator_rtc_read_time, + .set_time = integrator_rtc_set_time, + .read_alarm = integrator_rtc_read_alarm, + .set_alarm = integrator_rtc_set_alarm, }; static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id, diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c index 60d5f8a3339c..7520e602d7a2 100644 --- a/arch/arm/mach-omap1/board-netstar.c +++ b/arch/arm/mach-omap1/board-netstar.c @@ -141,7 +141,7 @@ static int __init netstar_late_init(void) /* TODO: Setup front panel switch here */ /* Setup panic notifier */ - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c index bfd5fdd1a875..52e4a9d69642 100644 --- a/arch/arm/mach-omap1/board-voiceblue.c +++ b/arch/arm/mach-omap1/board-voiceblue.c @@ -235,7 +235,7 @@ static struct notifier_block panic_block = { static int __init voiceblue_setup(void) { /* Setup panic notifier */ - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 9b48a90aefce..5efa84749f37 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -319,6 +319,11 @@ void __init pxa_set_ficp_info(struct pxaficp_platform_data *info) pxaficp_device.dev.platform_data = info; } +static struct platform_device pxartc_device = { + .name = "sa1100-rtc", + .id = -1, +}; + static struct platform_device *devices[] __initdata = { &pxamci_device, &udc_device, @@ -329,6 +334,7 @@ static struct platform_device *devices[] __initdata = { &pxaficp_device, &i2c_device, &i2s_device, + &pxartc_device, }; static int __init pxa_init(void) diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 2abdc419e984..9ea71551fc04 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -324,6 +324,11 @@ void sa11x0_set_irda_data(struct irda_platform_data *irda) sa11x0ir_device.dev.platform_data = irda; } +static struct platform_device sa11x0rtc_device = { + .name = "sa1100-rtc", + .id = -1, +}; + static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0udc_device, &sa11x0uart1_device, @@ -333,6 +338,7 @@ static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0pcmcia_device, &sa11x0fb_device, &sa11x0mtd_device, + &sa11x0rtc_device, }; static int __init sa1100_init(void) diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig index dee23d87fc5a..cf4ebf4c274d 100644 --- a/arch/arm26/Kconfig +++ b/arch/arm26/Kconfig @@ -41,6 +41,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c index 5847ea5d7747..a79de041b50e 100644 --- a/arch/arm26/kernel/traps.c +++ b/arch/arm26/kernel/traps.c @@ -34,7 +34,7 @@ #include <asm/system.h> #include <asm/uaccess.h> #include <asm/unistd.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include "ptrace.h" @@ -207,19 +207,19 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err) die(str, regs, err); } -static DECLARE_MUTEX(undef_sem); +static DEFINE_MUTEX(undef_mutex); static int (*undef_hook)(struct pt_regs *); int request_undef_hook(int (*fn)(struct pt_regs *)) { int ret = -EBUSY; - down(&undef_sem); + mutex_lock(&undef_mutex); if (undef_hook == NULL) { undef_hook = fn; ret = 0; } - up(&undef_sem); + mutex_unlock(&undef_mutex); return ret; } @@ -228,12 +228,12 @@ int release_undef_hook(int (*fn)(struct pt_regs *)) { int ret = -EINVAL; - down(&undef_sem); + mutex_lock(&undef_mutex); if (undef_hook == fn) { undef_hook = NULL; ret = 0; } - up(&undef_sem); + mutex_unlock(&undef_mutex); return ret; } diff --git a/arch/arm26/mm/init.c b/arch/arm26/mm/init.c index e3ecaa453747..7da8a5205678 100644 --- a/arch/arm26/mm/init.c +++ b/arch/arm26/mm/init.c @@ -23,6 +23,7 @@ #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/blkdev.h> +#include <linux/pfn.h> #include <asm/segment.h> #include <asm/mach-types.h> @@ -101,12 +102,6 @@ struct node_info { int bootmap_pages; }; -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) -#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) -#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ - (((unsigned long)(s)) & PAGE_MASK)) - /* * FIXME: We really want to avoid allocating the bootmap bitmap * over the top of the initrd. Hopefully, this is located towards diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index b83261949737..856b665020e7 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -16,6 +16,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c index 1ba57efff60d..619a6eefd893 100644 --- a/arch/cris/kernel/setup.c +++ b/arch/cris/kernel/setup.c @@ -18,6 +18,7 @@ #include <linux/seq_file.h> #include <linux/tty.h> #include <linux/utsname.h> +#include <linux/pfn.h> #include <asm/setup.h> @@ -88,10 +89,6 @@ setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - /* min_low_pfn points to the start of DRAM, start_pfn points * to the first DRAM pages after the kernel, and max_low_pfn * to the end of DRAM. diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index e08383712370..95a3892b8d1b 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -17,6 +17,10 @@ config GENERIC_FIND_NEXT_BIT bool default y +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default n diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index f2c6866fc88b..1530a4111e6d 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c @@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx) /* find the first unallocated context number * - 0 is reserved for the kernel */ - cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); + cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1); if (cxn < NR_CXN) { - set_bit(cxn, &cxn_bitmap); + set_bit(cxn, cxn_bitmap); } else { /* none remaining - need to steal someone else's cxn */ @@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm) cxn_pinned = -1; list_del_init(&ctx->id_link); - clear_bit(ctx->id, &cxn_bitmap); + clear_bit(ctx->id, cxn_bitmap); __flush_tlb_mm(ctx->id); ctx->id = 0; } diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 98308b018a35..cabf0bfffc53 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -29,6 +29,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default n +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index b008fb0cd7b7..f17bd1d2707e 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -37,6 +37,10 @@ config GENERIC_IOMAP bool default y +config GENERIC_HWEIGHT + bool + default y + config ARCH_MAY_HAVE_PC_FDC bool default y @@ -227,6 +231,15 @@ config SCHED_SMT cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config X86_UP_APIC diff --git a/arch/i386/Makefile b/arch/i386/Makefile index c848a5b30391..3e4adb1e2244 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile @@ -103,7 +103,7 @@ AFLAGS += $(mflags-y) boot := arch/i386/boot PHONY += zImage bzImage compressed zlilo bzlilo \ - zdisk bzdisk fdimage fdimage144 fdimage288 install + zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install all: bzImage @@ -122,7 +122,7 @@ zlilo bzlilo: vmlinux zdisk bzdisk: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk -fdimage fdimage144 fdimage288: vmlinux +fdimage fdimage144 fdimage288 isoimage: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ install: @@ -139,6 +139,9 @@ define archhelp echo ' install to $$(INSTALL_PATH) and run lilo' echo ' bzdisk - Create a boot floppy in /dev/fd0' echo ' fdimage - Create a boot floppy image' + echo ' isoimage - Create a boot CD-ROM image' endef -CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf +CLEAN_FILES += arch/$(ARCH)/boot/fdimage \ + arch/$(ARCH)/boot/image.iso \ + arch/$(ARCH)/boot/mtools.conf diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile index f136752563b1..33e55476381b 100644 --- a/arch/i386/boot/Makefile +++ b/arch/i386/boot/Makefile @@ -62,8 +62,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ -# Set this if you want to pass append arguments to the zdisk/fdimage kernel +# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel FDARGS = +# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel +FDINITRD = + +image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) $(obj)/mtools.conf: $(src)/mtools.conf.in sed -e 's|@OBJ@|$(obj)|g' < $< > $@ @@ -72,8 +76,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in zdisk: $(BOOTIMAGE) $(obj)/mtools.conf MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync syslinux /dev/fd0 ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync # These require being root or having syslinux 2.02 or higher installed @@ -81,18 +88,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync +isoimage: $(BOOTIMAGE) + -rm -rf $(obj)/isoimage + mkdir $(obj)/isoimage + cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ + $(obj)/isoimage + cp $(BOOTIMAGE) $(obj)/isoimage/linux + echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ + fi + mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \ + -no-emul-boot -boot-load-size 4 -boot-info-table \ + $(obj)/isoimage + rm -rf $(obj)/isoimage + zlilo: $(BOOTIMAGE) if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 2ac40c8244c4..0000a2674537 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S @@ -1924,6 +1924,7 @@ skip10: movb %ah, %al ret store_edid: +#ifdef CONFIG_FB_FIRMWARE_EDID pushw %es # just save all registers pushw %ax pushw %bx @@ -1954,6 +1955,7 @@ store_edid: popw %bx popw %ax popw %es +#endif ret # VIDEO_SELECT-only variables diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index f1a21945963d..033066176b3e 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -668,10 +668,10 @@ unsigned long __init acpi_find_rsdp(void) unsigned long rsdp_phys = 0; if (efi_enabled) { - if (efi.acpi20) - return __pa(efi.acpi20); - else if (efi.acpi) - return __pa(efi.acpi); + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + return efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + return efi.acpi; } /* * Scan memory looking for the RSDP signature. First search EBDA (low diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 7e3d6b6a4e96..a06a49075f10 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -266,7 +266,7 @@ static void __init early_cpu_detect(void) void __cpuinit generic_identify(struct cpuinfo_x86 * c) { u32 tfms, xlvl; - int junk; + int ebx; if (have_cpuid_p()) { /* Get vendor name */ @@ -282,7 +282,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) /* Intel-defined flags: level 0x00000001 */ if ( c->cpuid_level >= 0x00000001 ) { u32 capability, excap; - cpuid(0x00000001, &tfms, &junk, &excap, &capability); + cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; @@ -292,6 +292,11 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; +#ifdef CONFIG_SMP + c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); +#else + c->apicid = (ebx >> 24) & 0xFF; +#endif } else { /* Have CPUID level 0 only - unheard of */ c->x86 = 4; @@ -474,7 +479,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) cpuid(1, &eax, &ebx, &ecx, &edx); - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e5bc06480ff9..712a26bd4457 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -40,6 +40,7 @@ #ifdef CONFIG_X86_POWERNOW_K8_ACPI #include <linux/acpi.h> +#include <linux/mutex.h> #include <acpi/processor.h> #endif @@ -49,7 +50,7 @@ #include "powernow-k8.h" /* serialize freq changes */ -static DECLARE_MUTEX(fidvid_sem); +static DEFINE_MUTEX(fidvid_mutex); static struct powernow_k8_data *powernow_data[NR_CPUS]; @@ -943,17 +944,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) goto err_out; - down(&fidvid_sem); + mutex_lock(&fidvid_mutex); powernow_k8_acpi_pst_values(data, newstate); if (transition_frequency(data, newstate)) { printk(KERN_ERR PFX "transition frequency failed\n"); ret = 1; - up(&fidvid_sem); + mutex_unlock(&fidvid_mutex); goto err_out; } - up(&fidvid_sem); + mutex_unlock(&fidvid_mutex); pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; @@ -1094,10 +1095,15 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) static unsigned int powernowk8_get (unsigned int cpu) { - struct powernow_k8_data *data = powernow_data[cpu]; + struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; unsigned int khz = 0; + data = powernow_data[first_cpu(cpu_core_map[cpu])]; + + if (!data) + return -EINVAL; + set_cpus_allowed(current, cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h index 00ea899c17e1..79a7c5c87edc 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h @@ -182,10 +182,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); -#ifndef for_each_cpu_mask -#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++) -#endif - #ifdef CONFIG_SMP static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) { diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ce61921369e5..9df87b03612c 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ + unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; +#ifdef CONFIG_SMP + unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); +#endif if (c->cpuid_level > 3) { static int is_initialized; @@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) break; case 2: new_l2 = this_leaf.size/1024; + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l2_id = c->apicid >> index_msb; break; case 3: new_l3 = this_leaf.size/1024; + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l3_id = c->apicid >> index_msb; break; default: break; @@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) } } } - if (c->cpuid_level > 1) { + /* + * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for + * trace cache + */ + if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int i, j, n; int regs[4]; unsigned char *dp = (unsigned char *)regs; + int only_trace = 0; + + if (num_cache_leaves != 0 && c->x86 == 15) + only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; @@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { + if (only_trace && cache_table[k].cache_type != LVL_TRACE) + break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; @@ -266,34 +286,45 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) } } } + } - if (new_l1d) - l1d = new_l1d; + if (new_l1d) + l1d = new_l1d; - if (new_l1i) - l1i = new_l1i; + if (new_l1i) + l1i = new_l1i; - if (new_l2) - l2 = new_l2; + if (new_l2) { + l2 = new_l2; +#ifdef CONFIG_SMP + cpu_llc_id[cpu] = l2_id; +#endif + } - if (new_l3) - l3 = new_l3; + if (new_l3) { + l3 = new_l3; +#ifdef CONFIG_SMP + cpu_llc_id[cpu] = l3_id; +#endif + } - if ( trace ) - printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); - else if ( l1i ) - printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); - if ( l1d ) - printk(", L1 D cache: %dK\n", l1d); - else - printk("\n"); - if ( l2 ) - printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); - if ( l3 ) - printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); + if (trace) + printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); + else if ( l1i ) + printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); - c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); - } + if (l1d) + printk(", L1 D cache: %dK\n", l1d); + else + printk("\n"); + + if (l2) + printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); + + if (l3) + printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); + + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; } diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 3b4618bed70d..fff90bda4733 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -36,6 +36,7 @@ #include <linux/pci.h> #include <linux/smp.h> #include <linux/cpu.h> +#include <linux/mutex.h> #include <asm/mtrr.h> @@ -47,7 +48,7 @@ u32 num_var_ranges = 0; unsigned int *usage_table; -static DECLARE_MUTEX(mtrr_sem); +static DEFINE_MUTEX(mtrr_mutex); u32 size_or_mask, size_and_mask; @@ -333,7 +334,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); /* Search for existing MTRR */ - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (base >= lbase + lsize) @@ -371,7 +372,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, printk(KERN_INFO "mtrr: no more MTRRs available\n"); error = i; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -464,7 +465,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { @@ -503,7 +504,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) set_mtrr(reg, 0, 0, 0); error = reg; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -685,7 +686,7 @@ void mtrr_ap_init(void) if (!mtrr_if || !use_intel()) return; /* - * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, + * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, * but this routine will be called in cpu boot time, holding the lock * breaks it. This routine is called in two cases: 1.very earily time * of software resume, when there absolutely isn't mtrr entry changes; diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c index ebc8dc116c43..5efceebc48dc 100644 --- a/arch/i386/kernel/dmi_scan.c +++ b/arch/i386/kernel/dmi_scan.c @@ -3,6 +3,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/dmi.h> +#include <linux/efi.h> #include <linux/bootmem.h> #include <linux/slab.h> #include <asm/dmi.h> @@ -185,47 +186,72 @@ static void __init dmi_decode(struct dmi_header *dm) } } -void __init dmi_scan_machine(void) +static int __init dmi_present(char __iomem *p) { u8 buf[15]; - char __iomem *p, *q; + memcpy_fromio(buf, p, 15); + if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { + u16 num = (buf[13] << 8) | buf[12]; + u16 len = (buf[7] << 8) | buf[6]; + u32 base = (buf[11] << 24) | (buf[10] << 16) | + (buf[9] << 8) | buf[8]; - /* - * no iounmap() for that ioremap(); it would be a no-op, but it's - * so early in setup that sucker gets confused into doing what - * it shouldn't if we actually call it. - */ - p = ioremap(0xF0000, 0x10000); - if (p == NULL) - goto out; - - for (q = p; q < p + 0x10000; q += 16) { - memcpy_fromio(buf, q, 15); - if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { - u16 num = (buf[13] << 8) | buf[12]; - u16 len = (buf[7] << 8) | buf[6]; - u32 base = (buf[11] << 24) | (buf[10] << 16) | - (buf[9] << 8) | buf[8]; - - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we don't know at this point. - */ - if (buf[14] != 0) - printk(KERN_INFO "DMI %d.%d present.\n", - buf[14] >> 4, buf[14] & 0xF); - else - printk(KERN_INFO "DMI present.\n"); + /* + * DMI version 0.0 means that the real version is taken from + * the SMBIOS version, which we don't know at this point. + */ + if (buf[14] != 0) + printk(KERN_INFO "DMI %d.%d present.\n", + buf[14] >> 4, buf[14] & 0xF); + else + printk(KERN_INFO "DMI present.\n"); + if (dmi_table(base,len, num, dmi_decode) == 0) + return 0; + } + return 1; +} - if (dmi_table(base,len, num, dmi_decode) == 0) +void __init dmi_scan_machine(void) +{ + char __iomem *p, *q; + int rc; + + if (efi_enabled) { + if (efi.smbios == EFI_INVALID_TABLE_ADDR) + goto out; + + /* This is called as a core_initcall() because it isn't + * needed during early boot. This also means we can + * iounmap the space when we're done with it. + */ + p = dmi_ioremap(efi.smbios, 32); + if (p == NULL) + goto out; + + rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ + dmi_iounmap(p, 32); + if (!rc) + return; + } + else { + /* + * no iounmap() for that ioremap(); it would be a no-op, but + * it's so early in setup that sucker gets confused into doing + * what it shouldn't if we actually call it. + */ + p = dmi_ioremap(0xF0000, 0x10000); + if (p == NULL) + goto out; + + for (q = p; q < p + 0x10000; q += 16) { + rc = dmi_present(q); + if (!rc) return; } } - -out: printk(KERN_INFO "DMI not present or invalid.\n"); + out: printk(KERN_INFO "DMI not present or invalid.\n"); } - /** * dmi_check_system - check system DMI data * @list: array of dmi_system_id structures to match against diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index 7ec6cfa01fb3..9202b67c4b2e 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c @@ -361,7 +361,7 @@ void __init efi_init(void) */ c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2); if (c16) { - for (i = 0; i < sizeof(vendor) && *c16; ++i) + for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } else @@ -381,29 +381,38 @@ void __init efi_init(void) if (config_tables == NULL) printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n"); + efi.mps = EFI_INVALID_TABLE_ADDR; + efi.acpi = EFI_INVALID_TABLE_ADDR; + efi.acpi20 = EFI_INVALID_TABLE_ADDR; + efi.smbios = EFI_INVALID_TABLE_ADDR; + efi.sal_systab = EFI_INVALID_TABLE_ADDR; + efi.boot_info = EFI_INVALID_TABLE_ADDR; + efi.hcdp = EFI_INVALID_TABLE_ADDR; + efi.uga = EFI_INVALID_TABLE_ADDR; + for (i = 0; i < num_config_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = (void *)config_tables[i].table; + efi.mps = config_tables[i].table; printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = __va(config_tables[i].table); + efi.acpi20 = config_tables[i].table; printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = __va(config_tables[i].table); + efi.acpi = config_tables[i].table; printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = (void *) config_tables[i].table; + efi.smbios = config_tables[i].table; printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = (void *)config_tables[i].table; + efi.hcdp = config_tables[i].table; printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) { - efi.uga = (void *)config_tables[i].table; + efi.uga = config_tables[i].table; printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table); } } diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 311b4e7266f1..3b329af4afc5 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -381,7 +381,7 @@ static void do_irq_balance(void) unsigned long imbalance = 0; cpumask_t allowed_mask, target_cpu_mask, tmp; - for_each_cpu(i) { + for_each_possible_cpu(i) { int package_index; CPU_IRQ(i) = 0; if (!cpu_online(i)) @@ -632,7 +632,7 @@ static int __init balanced_irq_init(void) else printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); failed: - for_each_cpu(i) { + for_each_possible_cpu(i) { kfree(irq_cpu_data[i].irq_delta); irq_cpu_data[i].irq_delta = NULL; kfree(irq_cpu_data[i].last_irq); diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 7a59050242a7..f19768789e8a 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -35,12 +35,56 @@ #include <asm/cacheflush.h> #include <asm/kdebug.h> #include <asm/desc.h> +#include <asm/uaccess.h> void jprobe_return_end(void); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); +/* insert a jmp code */ +static inline void set_jmp_op(void *from, void *to) +{ + struct __arch_jmp_op { + char op; + long raddr; + } __attribute__((packed)) *jop; + jop = (struct __arch_jmp_op *)from; + jop->raddr = (long)(to) - ((long)(from) + 5); + jop->op = RELATIVEJUMP_INSTRUCTION; +} + +/* + * returns non-zero if opcodes can be boosted. + */ +static inline int can_boost(kprobe_opcode_t opcode) +{ + switch (opcode & 0xf0 ) { + case 0x70: + return 0; /* can't boost conditional jump */ + case 0x90: + /* can't boost call and pushf */ + return opcode != 0x9a && opcode != 0x9c; + case 0xc0: + /* can't boost undefined opcodes and soft-interruptions */ + return (0xc1 < opcode && opcode < 0xc6) || + (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf; + case 0xd0: + /* can boost AA* and XLAT */ + return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); + case 0xe0: + /* can boost in/out and (may be) jmps */ + return (0xe3 < opcode && opcode != 0xe8); + case 0xf0: + /* clear and set flags can be boost */ + return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); + default: + /* currently, can't boost 2 bytes opcodes */ + return opcode != 0x0f; + } +} + + /* * returns non-zero if opcode modifies the interrupt flag. */ @@ -65,6 +109,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; + if (can_boost(p->opcode)) { + p->ainsn.boostable = 0; + } else { + p->ainsn.boostable = -1; + } return 0; } @@ -155,9 +204,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; - kprobe_opcode_t *addr = NULL; - unsigned long *lp; + kprobe_opcode_t *addr; struct kprobe_ctlblk *kcb; +#ifdef CONFIG_PREEMPT + unsigned pre_preempt_count = preempt_count(); +#endif /* CONFIG_PREEMPT */ + + addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); /* * We don't want to be preempted for the entire @@ -166,17 +219,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_disable(); kcb = get_kprobe_ctlblk(); - /* Check if the application is using LDT entry for its code segment and - * calculate the address by reading the base address from the LDT entry. - */ - if ((regs->xcs & 4) && (current->mm)) { - lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8) - + (char *) current->mm->context.ldt); - addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip - - sizeof(kprobe_opcode_t)); - } else { - addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); - } /* Check we're not actually recursing */ if (kprobe_running()) { p = get_kprobe(addr); @@ -252,6 +294,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) /* handler has already set things up, so skip ss setup */ return 1; + if (p->ainsn.boostable == 1 && +#ifdef CONFIG_PREEMPT + !(pre_preempt_count) && /* + * This enables booster when the direct + * execution path aren't preempted. + */ +#endif /* CONFIG_PREEMPT */ + !p->post_handler && !p->break_handler ) { + /* Boost up -- we can execute copied instructions directly */ + reset_current_kprobe(); + regs->eip = (unsigned long)p->ainsn.insn; + preempt_enable_no_resched(); + return 1; + } + ss_probe: prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; @@ -267,17 +324,44 @@ no_kprobe: * here. When a retprobed function returns, this probe is hit and * trampoline_probe_handler() runs, calling the kretprobe's handler. */ - void kretprobe_trampoline_holder(void) + void __kprobes kretprobe_trampoline_holder(void) { - asm volatile ( ".global kretprobe_trampoline\n" + asm volatile ( ".global kretprobe_trampoline\n" "kretprobe_trampoline: \n" - "nop\n"); - } + " pushf\n" + /* skip cs, eip, orig_eax, es, ds */ + " subl $20, %esp\n" + " pushl %eax\n" + " pushl %ebp\n" + " pushl %edi\n" + " pushl %esi\n" + " pushl %edx\n" + " pushl %ecx\n" + " pushl %ebx\n" + " movl %esp, %eax\n" + " call trampoline_handler\n" + /* move eflags to cs */ + " movl 48(%esp), %edx\n" + " movl %edx, 44(%esp)\n" + /* save true return address on eflags */ + " movl %eax, 48(%esp)\n" + " popl %ebx\n" + " popl %ecx\n" + " popl %edx\n" + " popl %esi\n" + " popl %edi\n" + " popl %ebp\n" + " popl %eax\n" + /* skip eip, orig_eax, es, ds */ + " addl $16, %esp\n" + " popf\n" + " ret\n"); +} /* - * Called when we hit the probe point at kretprobe_trampoline + * Called from kretprobe_trampoline */ -int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head; @@ -306,8 +390,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; - if (ri->rp && ri->rp->handler) + if (ri->rp && ri->rp->handler){ + __get_cpu_var(current_kprobe) = &ri->rp->kp; ri->rp->handler(ri, regs); + __get_cpu_var(current_kprobe) = NULL; + } orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri); @@ -322,18 +409,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) } BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); - regs->eip = orig_ret_address; - reset_current_kprobe(); spin_unlock_irqrestore(&kretprobe_lock, flags); - preempt_enable_no_resched(); - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler - * to run (and have re-enabled preemption) - */ - return 1; + return (void*)orig_ret_address; } /* @@ -357,15 +436,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * 2) If the single-stepped instruction was a call, the return address * that is atop the stack is the address following the copied instruction. * We need to make it the address following the original instruction. + * + * This function also checks instruction size for preparing direct execution. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = (unsigned long *)®s->esp; - unsigned long next_eip = 0; unsigned long copy_eip = (unsigned long)p->ainsn.insn; unsigned long orig_eip = (unsigned long)p->addr; + regs->eflags &= ~TF_MASK; switch (p->ainsn.insn[0]) { case 0x9c: /* pushfl */ *tos &= ~(TF_MASK | IF_MASK); @@ -375,37 +456,51 @@ static void __kprobes resume_execution(struct kprobe *p, case 0xcb: case 0xc2: case 0xca: - regs->eflags &= ~TF_MASK; - /* eip is already adjusted, no more changes required*/ - return; + case 0xea: /* jmp absolute -- eip is correct */ + /* eip is already adjusted, no more changes required */ + p->ainsn.boostable = 1; + goto no_change; case 0xe8: /* call relative - Fix return addr */ *tos = orig_eip + (*tos - copy_eip); break; case 0xff: if ((p->ainsn.insn[1] & 0x30) == 0x10) { /* call absolute, indirect */ - /* Fix return addr; eip is correct. */ - next_eip = regs->eip; + /* + * Fix return addr; eip is correct. + * But this is not boostable + */ *tos = orig_eip + (*tos - copy_eip); + goto no_change; } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ - /* eip is correct. */ - next_eip = regs->eip; + /* eip is correct. And this is boostable */ + p->ainsn.boostable = 1; + goto no_change; } - break; - case 0xea: /* jmp absolute -- eip is correct */ - next_eip = regs->eip; - break; default: break; } - regs->eflags &= ~TF_MASK; - if (next_eip) { - regs->eip = next_eip; - } else { - regs->eip = orig_eip + (regs->eip - copy_eip); + if (p->ainsn.boostable == 0) { + if ((regs->eip > copy_eip) && + (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { + /* + * These instructions can be executed directly if it + * jumps back to correct address. + */ + set_jmp_op((void *)regs->eip, + (void *)orig_eip + (regs->eip - copy_eip)); + p->ainsn.boostable = 1; + } else { + p->ainsn.boostable = -1; + } } + + regs->eip = orig_eip + (regs->eip - copy_eip); + +no_change: + return; } /* @@ -453,15 +548,57 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs, kcb); + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the eip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->eip = (unsigned long)cur->addr; regs->eflags |= kcb->kprobe_old_eflags; - - reset_current_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } return 0; } @@ -475,6 +612,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_INT3: if (kprobe_handler(args->regs)) @@ -564,12 +704,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, - .pre_handler = trampoline_probe_handler -}; - int __init arch_init_kprobes(void) { - return register_kprobe(&trampoline_p); + return 0; } diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 55bc365b8753..e7c138f66c5a 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -81,6 +81,7 @@ #include <linux/miscdevice.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <asm/msr.h> #include <asm/uaccess.h> @@ -114,7 +115,7 @@ MODULE_LICENSE("GPL"); static DEFINE_SPINLOCK(microcode_update_lock); /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ -static DECLARE_MUTEX(microcode_sem); +static DEFINE_MUTEX(microcode_mutex); static void __user *user_buffer; /* user area microcode data buffer */ static unsigned int user_buffer_size; /* it's size */ @@ -444,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ return -EINVAL; } - down(µcode_sem); + mutex_lock(µcode_mutex); user_buffer = (void __user *) buf; user_buffer_size = (int) len; @@ -453,31 +454,14 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ if (!ret) ret = (ssize_t)len; - up(µcode_sem); + mutex_unlock(µcode_mutex); return ret; } -static int microcode_ioctl (struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - /* - * XXX: will be removed after microcode_ctl - * is updated to ignore failure of this ioctl() - */ - case MICROCODE_IOCFREE: - return 0; - default: - return -EINVAL; - } - return -EINVAL; -} - static struct file_operations microcode_fops = { .owner = THIS_MODULE, .write = microcode_write, - .ioctl = microcode_ioctl, .open = microcode_open, }; diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 9074818b9473..d43b498ec745 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void) if (nmi_watchdog == NMI_LOCAL_APIC) smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); - for_each_cpu(cpu) + for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set after the timer is started. */ @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ - for_each_cpu(i) + for_each_possible_cpu(i) alert_counter[i] = 0; /* @@ -529,7 +529,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ - int sum, cpu = smp_processor_id(); + unsigned int sum; + int cpu = smp_processor_id(); sum = per_cpu(irq_stat, cpu).apic_timer_irqs; diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 299e61674084..24b3e745478b 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -38,7 +38,6 @@ #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/random.h> -#include <linux/kprobes.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -364,13 +363,6 @@ void exit_thread(void) struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(tsk); - /* The process may have allocated an io port bitmap... nuke it. */ if (unlikely(NULL != t->io_bitmap_ptr)) { int cpu = get_cpu(); diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index d313a11acafa..8c08660b4e5d 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -46,6 +46,7 @@ #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/dmi.h> +#include <linux/pfn.h> #include <video/edid.h> @@ -1058,10 +1059,10 @@ static int __init free_available_memory(unsigned long start, unsigned long end, void *arg) { /* check max_low_pfn */ - if (start >= ((max_low_pfn + 1) << PAGE_SHIFT)) + if (start >= (max_low_pfn << PAGE_SHIFT)) return 0; - if (end >= ((max_low_pfn + 1) << PAGE_SHIFT)) - end = (max_low_pfn + 1) << PAGE_SHIFT; + if (end >= (max_low_pfn << PAGE_SHIFT)) + end = max_low_pfn << PAGE_SHIFT; if (start < end) free_bootmem(start, end - start); @@ -1286,8 +1287,6 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat probe_roms(); for (i = 0; i < e820.nr_map; i++) { struct resource *res; - if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) - continue; res = kzalloc(sizeof(struct resource), GFP_ATOMIC); switch (e820.map[i].type) { case E820_RAM: res->name = "System RAM"; break; diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 82371d83bfa9..a6969903f2d6 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -72,6 +72,9 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; /* Core ID of each logical CPU */ int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; +/* Last level cache ID of each logical CPU */ +int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; + /* representing HT siblings of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); @@ -440,6 +443,18 @@ static void __devinit smp_callin(void) static int cpucount; +/* maps the cpu to the sched domain representing multi-core */ +cpumask_t cpu_coregroup_map(int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + /* + * For perf, we return last level cache shared map. + * TBD: when power saving sched policy is added, we will return + * cpu_core_map when power saving policy is enabled + */ + return c->llc_shared_map; +} + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -459,12 +474,16 @@ set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_map[i]); cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); + cpu_set(i, c[cpu].llc_shared_map); + cpu_set(cpu, c[i].llc_shared_map); } } } else { cpu_set(cpu, cpu_sibling_map[cpu]); } + cpu_set(cpu, c[cpu].llc_shared_map); + if (current_cpu_data.x86_max_cores == 1) { cpu_core_map[cpu] = cpu_sibling_map[cpu]; c[cpu].booted_cores = 1; @@ -472,6 +491,11 @@ set_cpu_sibling_map(int cpu) } for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (cpu_llc_id[cpu] != BAD_APICID && + cpu_llc_id[cpu] == cpu_llc_id[i]) { + cpu_set(i, c[cpu].llc_shared_map); + cpu_set(cpu, c[i].llc_shared_map); + } if (phys_proc_id[cpu] == phys_proc_id[i]) { cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index ac687d00a1ce..326595f3fa4d 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -310,3 +310,5 @@ ENTRY(sys_call_table) .long sys_pselect6 .long sys_ppoll .long sys_unshare /* 310 */ + .long sys_set_robust_list + .long sys_get_robust_list diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c index 264edaaac315..144e94a04933 100644 --- a/arch/i386/kernel/timers/timer_pm.c +++ b/arch/i386/kernel/timers/timer_pm.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> +#include <linux/pci.h> #include <asm/types.h> #include <asm/timer.h> #include <asm/smp.h> @@ -45,24 +46,31 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; #define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ +static int pmtmr_need_workaround __read_mostly = 1; + /*helper function to safely read acpi pm timesource*/ static inline u32 read_pmtmr(void) { - u32 v1=0,v2=0,v3=0; - /* It has been reported that because of various broken - * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time - * source is not latched, so you must read it multiple - * times to insure a safe value is read. - */ - do { - v1 = inl(pmtmr_ioport); - v2 = inl(pmtmr_ioport); - v3 = inl(pmtmr_ioport); - } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) - || (v3 > v1 && v3 < v2)); - - /* mask the output to 24 bits */ - return v2 & ACPI_PM_MASK; + if (pmtmr_need_workaround) { + u32 v1, v2, v3; + + /* It has been reported that because of various broken + * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time + * source is not latched, so you must read it multiple + * times to insure a safe value is read. + */ + do { + v1 = inl(pmtmr_ioport); + v2 = inl(pmtmr_ioport); + v3 = inl(pmtmr_ioport); + } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) + || (v3 > v1 && v3 < v2)); + + /* mask the output to 24 bits */ + return v2 & ACPI_PM_MASK; + } + + return inl(pmtmr_ioport) & ACPI_PM_MASK; } @@ -263,6 +271,72 @@ struct init_timer_opts __initdata timer_pmtmr_init = { .opts = &timer_pmtmr, }; +#ifdef CONFIG_PCI +/* + * PIIX4 Errata: + * + * The power management timer may return improper results when read. + * Although the timer value settles properly after incrementing, + * while incrementing there is a 3 ns window every 69.8 ns where the + * timer value is indeterminate (a 4.2% chance that the data will be + * incorrect when read). As a result, the ACPI free running count up + * timer specification is violated due to erroneous reads. + */ +static int __init pmtmr_bug_check(void) +{ + static struct pci_device_id gray_list[] __initdata = { + /* these chipsets may have bug. */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801DB_0) }, + { }, + }; + struct pci_dev *dev; + int pmtmr_has_bug = 0; + u8 rev; + + if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround) + return 0; + + dev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_3, NULL); + if (dev) { + pci_read_config_byte(dev, PCI_REVISION_ID, &rev); + /* the bug has been fixed in PIIX4M */ + if (rev < 3) { + printk(KERN_WARNING "* Found PM-Timer Bug on this " + "chipset. Due to workarounds for a bug,\n" + "* this time source is slow. Consider trying " + "other time sources (clock=)\n"); + pmtmr_has_bug = 1; + } + pci_dev_put(dev); + } + + if (pci_dev_present(gray_list)) { + printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due" + " to workarounds for a bug,\n" + "* this time source is slow. If you are sure your timer" + " does not have\n" + "* this bug, please use \"pmtmr_good\" to disable the " + "workaround\n"); + pmtmr_has_bug = 1; + } + + if (!pmtmr_has_bug) + pmtmr_need_workaround = 0; + + return 0; +} +device_initcall(pmtmr_bug_check); +#endif + +static int __init pmtr_good_setup(char *__str) +{ + pmtmr_need_workaround = 0; + return 1; +} +__setup("pmtmr_good", pmtr_good_setup); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86"); diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index de5386b01d38..6b63a5aa1e46 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void); asmlinkage void machine_check(void); static int kstack_depth_to_print = 24; -struct notifier_block *i386die_chain; -static DEFINE_SPINLOCK(die_notifier_lock); +ATOMIC_NOTIFIER_HEAD(i386die_chain); int register_die_notifier(struct notifier_block *nb) { - int err = 0; - unsigned long flags; - vmalloc_sync_all(); - spin_lock_irqsave(&die_notifier_lock, flags); - err = notifier_chain_register(&i386die_chain, nb); - spin_unlock_irqrestore(&die_notifier_lock, flags); - return err; + return atomic_notifier_chain_register(&i386die_chain, nb); } EXPORT_SYMBOL(register_die_notifier); +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&i386die_chain, nb); +} +EXPORT_SYMBOL(unregister_die_notifier); + static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) { return p > (void *)tinfo && @@ -386,8 +385,12 @@ void die(const char * str, struct pt_regs * regs, long err) #endif if (nl) printk("\n"); - notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); - show_registers(regs); + if (notify_die(DIE_OOPS, str, regs, err, + current->thread.trap_no, SIGSEGV) != + NOTIFY_STOP) + show_registers(regs); + else + regs = NULL; } else printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); @@ -395,6 +398,9 @@ void die(const char * str, struct pt_regs * regs, long err) die.lock_owner = -1; spin_unlock_irqrestore(&die.lock, flags); + if (!regs) + return; + if (kexec_should_crash(current)) crash_kexec(regs); @@ -623,7 +629,7 @@ static DEFINE_SPINLOCK(nmi_print_lock); void die_nmi (struct pt_regs *regs, const char *msg) { - if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) == + if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP) return; @@ -662,7 +668,7 @@ static void default_do_nmi(struct pt_regs * regs) reason = get_nmi_reason(); if (!(reason & 0xc0)) { - if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) + if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) == NOTIFY_STOP) return; #ifdef CONFIG_X86_LOCAL_APIC @@ -678,7 +684,7 @@ static void default_do_nmi(struct pt_regs * regs) unknown_nmi_error(reason, regs); return; } - if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) + if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) return; if (reason & 0x80) mem_parity_error(reason, regs); diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 8165626a5c30..70e560a1b79a 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -1700,7 +1700,7 @@ after_handle_vic_irq(unsigned int irq) printk("VOYAGER SMP: CPU%d lost interrupt %d\n", cpu, irq); - for_each_cpu(real_cpu, mask) { + for_each_possible_cpu(real_cpu, mask) { outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, VIC_PROCESSOR_ID); diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index c4af9638dbfa..fe6eb901326e 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c @@ -31,6 +31,7 @@ #include <linux/nodemask.h> #include <linux/module.h> #include <linux/kexec.h> +#include <linux/pfn.h> #include <asm/e820.h> #include <asm/setup.h> @@ -352,17 +353,6 @@ void __init zone_sizes_init(void) { int nid; - /* - * Insert nodes into pgdat_list backward so they appear in order. - * Clobber node 0's links and NULL out pgdat_list before starting. - */ - pgdat_list = NULL; - for (nid = MAX_NUMNODES - 1; nid >= 0; nid--) { - if (!node_online(nid)) - continue; - NODE_DATA(nid)->pgdat_next = pgdat_list; - pgdat_list = NODE_DATA(nid); - } for_each_online_node(nid) { unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 9db3242103be..2889567e21a1 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -36,7 +36,7 @@ void show_mem(void) printk(KERN_INFO "Mem-info:\n"); show_free_areas(); printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; ++i) { page = pgdat_page_nr(pgdat, i); diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 1accce50c2c7..1a2076ce6f6a 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c @@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) static void free_msrs(void) { int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { kfree(cpu_msrs[i].counters); cpu_msrs[i].counters = NULL; kfree(cpu_msrs[i].controls); diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 10b6b9e7716b..edffe25a477a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -34,6 +34,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -42,6 +46,10 @@ config TIME_INTERPOLATION bool default y +config DMI + bool + default y + config EFI bool default y diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index 68ceb4e690c7..ccb98ed48e58 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h @@ -29,9 +29,9 @@ struct partial_page { struct partial_page *next; /* linked list, sorted by address */ struct rb_node pp_rb; - /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32 + /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 * should suffice.*/ - unsigned int bitmap; + unsigned long bitmap; unsigned int base; }; diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 13e739e4c84d..5366b3b23d09 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -25,7 +25,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -2591,78 +2590,4 @@ sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); return sys_setresgid(srgid, segid, ssgid); } - -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long -sys32_adjtimex(struct timex32 *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if(get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if(put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} #endif /* NOTYET */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 09a0dbc17fb6..59e871dae742 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ - unwind.o mca.o mca_asm.o topology.o + unwind.o mca.o mca_asm.o topology.o dmi_scan.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o @@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o mca_recovery-y += mca_drv.o mca_drv_asm.o +dmi_scan-y += ../../i386/kernel/dmi_scan.o # The gate DSO image is built using a special linker script. targets += gate.so gate-syms.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a4e218ce2edb..58c93a30348c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -651,9 +651,9 @@ unsigned long __init acpi_find_rsdp(void) { unsigned long rsdp_phys = 0; - if (efi.acpi20) - rsdp_phys = __pa(efi.acpi20); - else if (efi.acpi) + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + rsdp_phys = efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 9990320b6f9a..12cfedce73b1 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -458,24 +458,33 @@ efi_init (void) printk(KERN_INFO "EFI v%u.%.02u by %s:", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); + efi.mps = EFI_INVALID_TABLE_ADDR; + efi.acpi = EFI_INVALID_TABLE_ADDR; + efi.acpi20 = EFI_INVALID_TABLE_ADDR; + efi.smbios = EFI_INVALID_TABLE_ADDR; + efi.sal_systab = EFI_INVALID_TABLE_ADDR; + efi.boot_info = EFI_INVALID_TABLE_ADDR; + efi.hcdp = EFI_INVALID_TABLE_ADDR; + efi.uga = EFI_INVALID_TABLE_ADDR; + for (i = 0; i < (int) efi.systab->nr_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = __va(config_tables[i].table); + efi.mps = config_tables[i].table; printk(" MPS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = __va(config_tables[i].table); + efi.acpi20 = config_tables[i].table; printk(" ACPI 2.0=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = __va(config_tables[i].table); + efi.acpi = config_tables[i].table; printk(" ACPI=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = __va(config_tables[i].table); + efi.smbios = config_tables[i].table; printk(" SMBIOS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { - efi.sal_systab = __va(config_tables[i].table); + efi.sal_systab = config_tables[i].table; printk(" SALsystab=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = __va(config_tables[i].table); + efi.hcdp = config_tables[i].table; printk(" HCDP=0x%lx", config_tables[i].table); } } @@ -677,27 +686,34 @@ EXPORT_SYMBOL(efi_mem_attributes); /* * Determines whether the memory at phys_addr supports the desired * attribute (WB, UC, etc). If this returns 1, the caller can safely - * access *size bytes at phys_addr with the specified attribute. + * access size bytes at phys_addr with the specified attribute. */ -static int -efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) +int +efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr) { + unsigned long end = phys_addr + size; efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); - unsigned long md_end; - if (!md || (md->attribute & attr) != attr) + /* + * Some firmware doesn't report MMIO regions in the EFI memory + * map. The Intel BigSur (a.k.a. HP i2000) has this problem. + * On those platforms, we have to assume UC is valid everywhere. + */ + if (!md || (md->attribute & attr) != attr) { + if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio()) + return 1; return 0; + } do { - md_end = efi_md_end(md); - if (phys_addr + *size <= md_end) + unsigned long md_end = efi_md_end(md); + + if (end <= md_end) return 1; md = efi_memory_descriptor(md_end); - if (!md || (md->attribute & attr) != attr) { - *size = md_end - phys_addr; - return 1; - } + if (!md || (md->attribute & attr) != attr) + return 0; } while (md); return 0; } @@ -708,7 +724,7 @@ efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) * control access size. */ int -valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) +valid_phys_addr_range (unsigned long phys_addr, unsigned long size) { return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); } @@ -723,7 +739,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) * because that doesn't appear in the boot-time EFI memory map. */ int -valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) +valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size) { if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) return 1; @@ -731,14 +747,6 @@ valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) return 1; - /* - * Some firmware doesn't report MMIO regions in the EFI memory map. - * The Intel BigSur (a.k.a. HP i2000) has this problem. In this - * case, we can't use the EFI memory map to validate mmap requests. - */ - if (!efi_memmap_has_mmio()) - return 1; - return 0; } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 50ae8c7d453d..789881ca83d4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -34,6 +34,7 @@ #include <asm/pgtable.h> #include <asm/kdebug.h> #include <asm/sections.h> +#include <asm/uaccess.h> extern void jprobe_inst_return(void); @@ -722,13 +723,50 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs); - reset_current_kprobe(); + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the instruction pointer points back to + * the probe address and allow the page fault handler + * to continue as a normal page fault. + */ + regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; + ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * Let ia64_do_page_fault() fix it. + */ + break; + default: + break; } return 0; @@ -740,6 +778,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 87ff7fe33cfb..8963171788d5 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -69,6 +69,7 @@ #include <linux/kernel.h> #include <linux/smp.h> #include <linux/workqueue.h> +#include <linux/cpumask.h> #include <asm/delay.h> #include <asm/kdebug.h> @@ -1505,7 +1506,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->cpu = cpu; p->thread_info = ti; p->state = TASK_UNINTERRUPTIBLE; - __set_bit(cpu, &p->cpus_allowed); + cpu_set(cpu, p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 309d59658e5f..355d57970ba3 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -30,7 +30,6 @@ #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/kprobes.h> #include <asm/cpu.h> #include <asm/delay.h> @@ -738,13 +737,6 @@ void exit_thread (void) { - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(current); - ia64_drop_fpu(current); #ifdef CONFIG_PERFMON /* if needed, stop monitoring and flush state to perfmon context */ diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index eb388e271b2b..e4dfda1eb7dd 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -37,6 +37,7 @@ #include <linux/string.h> #include <linux/threads.h> #include <linux/tty.h> +#include <linux/dmi.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/efi.h> @@ -433,7 +434,7 @@ setup_arch (char **cmdline_p) find_memory(); /* process SAL system table: */ - ia64_sal_init(efi.sal_systab); + ia64_sal_init(__va(efi.sal_systab)); ia64_setup_printk_clock(); @@ -887,3 +888,10 @@ check_bugs (void) ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } + +static int __init run_dmi_scan(void) +{ + dmi_scan_machine(); + return 0; +} +core_initcall(run_dmi_scan); diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index dabd6c32641e..7c1ddc8ac443 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock; fpswa_interface_t *fpswa_interface; EXPORT_SYMBOL(fpswa_interface); -struct notifier_block *ia64die_chain; +ATOMIC_NOTIFIER_HEAD(ia64die_chain); int register_die_notifier(struct notifier_block *nb) { - return notifier_chain_register(&ia64die_chain, nb); + return atomic_notifier_chain_register(&ia64die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&ia64die_chain, nb); + return atomic_notifier_chain_unregister(&ia64die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier); diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index ac64664a1807..d8536a2c22a9 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -6,7 +6,7 @@ obj-y := io.o lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ - bitop.o checksum.o clear_page.o csum_partial_copy.o \ + checksum.o clear_page.o csum_partial_copy.o \ clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ flush.o ip_fast_csum.o do_csum.o \ memset.o strlen.o diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c deleted file mode 100644 index 82e299c8464e..000000000000 --- a/arch/ia64/lib/bitop.c +++ /dev/null @@ -1,88 +0,0 @@ -#include <linux/compiler.h> -#include <linux/types.h> -#include <asm/intrinsics.h> -#include <linux/module.h> -#include <linux/bitops.h> - -/* - * Find next zero bit in a bitmap reasonably efficiently.. - */ - -int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (64-offset); - if (size < 64) - goto found_first; - if (~tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* any bits zero? */ - return result + size; /* nope */ -found_middle: - return result + ffz(tmp); -} -EXPORT_SYMBOL(__find_next_zero_bit); - -/* - * Find next bit in a bitmap reasonably efficiently.. - */ -int __find_next_bit(const void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp &= ~0UL << offset; - if (size < 64) - goto found_first; - if (tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if ((tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - found_first: - tmp &= ~0UL >> (64-size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ - found_middle: - return result + __ffs(tmp); -} -EXPORT_SYMBOL(__find_next_bit); diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index d78d20f0a0f0..bb0a01a81878 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the ia64-specific parts of the memory manager. # -obj-y := init.o fault.o tlb.o extable.o +obj-y := init.o fault.o tlb.o extable.o ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 2f5e44862e91..ec9eeb89975d 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -379,31 +379,6 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) } /** - * pgdat_insert - insert the pgdat into global pgdat_list - * @pgdat: the pgdat for a node. - */ -static void __init pgdat_insert(pg_data_t *pgdat) -{ - pg_data_t *prev = NULL, *next; - - for_each_pgdat(next) - if (pgdat->node_id < next->node_id) - break; - else - prev = next; - - if (prev) { - prev->pgdat_next = pgdat; - pgdat->pgdat_next = next; - } else { - pgdat->pgdat_next = pgdat_list; - pgdat_list = pgdat; - } - - return; -} - -/** * memory_less_nodes - allocate and initialize CPU only nodes pernode * information. */ @@ -560,7 +535,7 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; int shared = 0, cached = 0, reserved = 0; @@ -745,11 +720,5 @@ void __init paging_init(void) pfn_offset, zholes_size); } - /* - * Make memory less nodes become a member of the known nodes. - */ - for_each_node_mask(node, memory_less_mask) - pgdat_insert(mem_data[node].pgdat); - zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ff4f31fcd330..2ef1151cde90 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -600,7 +600,7 @@ mem_init (void) kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); kclist_add(&kcore_kernel, _stext, _end - _stext); - for_each_pgdat(pgdat) + for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c new file mode 100644 index 000000000000..62328621f99c --- /dev/null +++ b/arch/ia64/mm/ioremap.c @@ -0,0 +1,43 @@ +/* + * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/efi.h> +#include <asm/io.h> + +static inline void __iomem * +__ioremap (unsigned long offset, unsigned long size) +{ + return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); +} + +void __iomem * +ioremap (unsigned long offset, unsigned long size) +{ + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) + return __ioremap(offset, size); + + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) + return phys_to_virt(offset); + + /* + * Someday this should check ACPI resources so we + * can do the right thing for hot-plugged regions. + */ + return __ioremap(offset, size); +} +EXPORT_SYMBOL(ioremap); + +void __iomem * +ioremap_nocache (unsigned long offset, unsigned long size) +{ + return __ioremap(offset, size); +} +EXPORT_SYMBOL(ioremap_nocache); diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 8b6d5c844708..30988dfbddff 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -327,10 +327,11 @@ sn_scan_pcdp(void) struct pcdp_interface_pci if_pci; extern struct efi efi; - pcdp = efi.hcdp; - if (! pcdp) + if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return; /* no hcdp/pcdp table */ + pcdp = __va(efi.hcdp); + if (pcdp->rev < 3) return; /* only support PCDP (rev >= 3) */ diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index c686d9c12f7b..5100261310f7 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c @@ -93,19 +93,22 @@ static int coherence_id_open(struct inode *inode, struct file *file) static struct proc_dir_entry *sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent, int (*openfunc)(struct inode *, struct file *), - int (*releasefunc)(struct inode *, struct file *)) + int (*releasefunc)(struct inode *, struct file *), + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *)) { struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); if (e) { - e->proc_fops = (struct file_operations *)kmalloc( - sizeof(struct file_operations), GFP_KERNEL); - if (e->proc_fops) { - memset(e->proc_fops, 0, sizeof(struct file_operations)); - e->proc_fops->open = openfunc; - e->proc_fops->read = seq_read; - e->proc_fops->llseek = seq_lseek; - e->proc_fops->release = releasefunc; + struct file_operations *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (f) { + f->open = openfunc; + f->read = seq_read; + f->llseek = seq_lseek; + f->release = releasefunc; + f->write = write; + e->proc_fops = f; } } @@ -119,31 +122,29 @@ extern int sn_topology_release(struct inode *, struct file *); void register_sn_procfs(void) { static struct proc_dir_entry *sgi_proc_dir = NULL; - struct proc_dir_entry *e; BUG_ON(sgi_proc_dir != NULL); if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) return; sn_procfs_create_entry("partition_id", sgi_proc_dir, - partition_id_open, single_release); + partition_id_open, single_release, NULL); sn_procfs_create_entry("system_serial_number", sgi_proc_dir, - system_serial_number_open, single_release); + system_serial_number_open, single_release, NULL); sn_procfs_create_entry("licenseID", sgi_proc_dir, - licenseID_open, single_release); + licenseID_open, single_release, NULL); - e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, - sn_force_interrupt_open, single_release); - if (e) - e->proc_fops->write = sn_force_interrupt_write_proc; + sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, + sn_force_interrupt_open, single_release, + sn_force_interrupt_write_proc); sn_procfs_create_entry("coherence_id", sgi_proc_dir, - coherence_id_open, single_release); + coherence_id_open, single_release, NULL); sn_procfs_create_entry("sn_topology", sgi_proc_dir, - sn_topology_open, sn_topology_release); + sn_topology_open, sn_topology_release, NULL); } #endif /* CONFIG_PROC_FS */ diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index a3dcc3fab4b7..05c864c6c2d9 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -214,6 +214,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default n +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c index d742037a7ccb..0d78942b4c76 100644 --- a/arch/m32r/kernel/setup.c +++ b/arch/m32r/kernel/setup.c @@ -24,6 +24,7 @@ #include <linux/tty.h> #include <linux/cpu.h> #include <linux/nodemask.h> +#include <linux/pfn.h> #include <asm/processor.h> #include <asm/pgtable.h> diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 08e727955555..cf610a7c5ff0 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -13,6 +13,7 @@ #include <linux/initrd.h> #include <linux/nodemask.h> #include <linux/module.h> +#include <linux/pfn.h> #include <asm/setup.h> @@ -137,12 +138,6 @@ unsigned long __init zone_sizes_init(void) int nid, i; mem_prof_t *mp; - pgdat_list = NULL; - for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) { - NODE_DATA(nid)->pgdat_next = pgdat_list; - pgdat_list = NODE_DATA(nid); - } - for_each_online_node(nid) { mp = &mem_prof[nid]; for (i = 0 ; i < MAX_NR_ZONES ; i++) { diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index c9e7dad860b7..b71348fec1f4 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -18,6 +18,7 @@ #include <linux/highmem.h> #include <linux/bitops.h> #include <linux/nodemask.h> +#include <linux/pfn.h> #include <asm/types.h> #include <asm/processor.h> #include <asm/page.h> @@ -47,7 +48,7 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long flags; pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; ++i) { diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 8849439e88dd..805b81fedf80 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -17,6 +17,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 3ffc84f9c291..c90cb5fcc8ef 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -142,7 +142,7 @@ void __init config_bvme6000(void) /* Now do the PIT configuration */ pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ - pit->psrr = 0x18; /* PIACK and PIRQ fucntions enabled */ + pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */ pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ pit->padr = 0x00; /* Just to be tidy! */ pit->paddr = 0x00; /* All inputs for now (safest) */ diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c index a69fe3048edc..b0e4c084df8a 100644 --- a/arch/m68k/mvme16x/rtc.c +++ b/arch/m68k/mvme16x/rtc.c @@ -17,6 +17,7 @@ #include <linux/poll.h> #include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ #include <linux/smp_lock.h> +#include <linux/bcd.h> #include <asm/mvme16xhw.h> #include <asm/io.h> @@ -31,9 +32,6 @@ * ioctls. */ -#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10) -#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) - static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index e50858dbc237..3cde6822ead1 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -25,6 +25,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default n +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ac2012f033d6..5080ea1799a4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -801,6 +801,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/mips/ddb5xxx/common/rtc_ds1386.c b/arch/mips/ddb5xxx/common/rtc_ds1386.c index 995896ac0e39..5dc34daa7150 100644 --- a/arch/mips/ddb5xxx/common/rtc_ds1386.c +++ b/arch/mips/ddb5xxx/common/rtc_ds1386.c @@ -165,6 +165,6 @@ rtc_ds1386_init(unsigned long base) WRITE_RTC(0xB, byte); /* set the function pointers */ - rtc_get_time = rtc_ds1386_get_time; - rtc_set_time = rtc_ds1386_set_time; + rtc_mips_get_time = rtc_ds1386_get_time; + rtc_mips_set_time = rtc_ds1386_set_time; } diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 174822344131..74cb055d4bf6 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -36,41 +36,13 @@ #include <asm/dec/ioasic_addrs.h> #include <asm/dec/machtype.h> - -/* - * Returns true if a clock update is in progress - */ -static inline unsigned char dec_rtc_is_updating(void) -{ - unsigned char uip; - unsigned long flags; - - spin_lock_irqsave(&rtc_lock, flags); - uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irqrestore(&rtc_lock, flags); - return uip; -} - static unsigned long dec_rtc_get_time(void) { unsigned int year, mon, day, hour, min, sec, real_year; - int i; unsigned long flags; - /* The Linux interpretation of the DS1287 clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0; i < 1000000; i++) /* may take up to 1 second... */ - if (dec_rtc_is_updating()) - break; - for (i = 0; i < 1000000; i++) /* must try at least 2.228 ms */ - if (!dec_rtc_is_updating()) - break; spin_lock_irqsave(&rtc_lock, flags); - /* Isn't this overkill? UIP above should guarantee consistency */ + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); @@ -78,7 +50,16 @@ static unsigned long dec_rtc_get_time(void) day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); + /* + * The PROM will reset the year to either '72 or '73. + * Therefore we store the real year separately, in one + * of unused BBU RAM locations. + */ + real_year = CMOS_READ(RTC_DEC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); + + spin_unlock_irqrestore(&rtc_lock, flags); + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = BCD2BIN(sec); min = BCD2BIN(min); @@ -87,13 +68,7 @@ static unsigned long dec_rtc_get_time(void) mon = BCD2BIN(mon); year = BCD2BIN(year); } - /* - * The PROM will reset the year to either '72 or '73. - * Therefore we store the real year separately, in one - * of unused BBU RAM locations. - */ - real_year = CMOS_READ(RTC_DEC_YEAR); - spin_unlock_irqrestore(&rtc_lock, flags); + year += real_year - 72 + 2000; return mktime(year, mon, day, hour, min, sec); @@ -193,8 +168,8 @@ static void dec_ioasic_hpt_init(unsigned int count) void __init dec_time_init(void) { - rtc_get_time = dec_rtc_get_time; - rtc_set_mmss = dec_rtc_set_mmss; + rtc_mips_get_time = dec_rtc_get_time; + rtc_mips_set_mmss = dec_rtc_set_mmss; mips_timer_state = dec_timer_state; mips_timer_ack = dec_timer_ack; diff --git a/arch/mips/ite-boards/generic/time.c b/arch/mips/ite-boards/generic/time.c index f5d67ee21ac6..b79817bb6cce 100644 --- a/arch/mips/ite-boards/generic/time.c +++ b/arch/mips/ite-boards/generic/time.c @@ -227,8 +227,8 @@ void __init it8172_time_init(void) local_irq_restore(flags); - rtc_get_time = it8172_rtc_get_time; - rtc_set_time = it8172_rtc_set_time; + rtc_mips_get_time = it8172_rtc_get_time; + rtc_mips_set_time = it8172_rtc_set_time; } #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) diff --git a/arch/mips/ite-boards/ivr/init.c b/arch/mips/ite-boards/ivr/init.c index ea4e1935fec5..b774db035b31 100644 --- a/arch/mips/ite-boards/ivr/init.c +++ b/arch/mips/ite-boards/ivr/init.c @@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void); extern unsigned long __init prom_get_memsize(void); extern void __init it8172_init_ram_resource(unsigned long memsize); -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) - const char *get_system_type(void) { return "Globespan IVR"; diff --git a/arch/mips/ite-boards/qed-4n-s01b/init.c b/arch/mips/ite-boards/qed-4n-s01b/init.c index 56dca7e0c21d..e8ec8be66a80 100644 --- a/arch/mips/ite-boards/qed-4n-s01b/init.c +++ b/arch/mips/ite-boards/qed-4n-s01b/init.c @@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void); extern unsigned long __init prom_get_memsize(void); extern void __init it8172_init_ram_resource(unsigned long memsize); -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) - const char *get_system_type(void) { return "ITE QED-4N-S01B"; diff --git a/arch/mips/jmr3927/common/rtc_ds1742.c b/arch/mips/jmr3927/common/rtc_ds1742.c index 9a8bff153d80..a6bd3f4d3049 100644 --- a/arch/mips/jmr3927/common/rtc_ds1742.c +++ b/arch/mips/jmr3927/common/rtc_ds1742.c @@ -159,8 +159,8 @@ rtc_ds1742_init(unsigned long base) db_assert((rtc_base & 0xe0000000) == KSEG1); /* set the function pointers */ - rtc_get_time = rtc_ds1742_get_time; - rtc_set_time = rtc_ds1742_set_time; + rtc_mips_get_time = rtc_ds1742_get_time; + rtc_mips_set_time = rtc_ds1742_set_time; /* clear oscillator stop bit */ CMOS_WRITE(RTC_READ, RTC_CONTROL); diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 013bc93688e8..3f40c37a9ee6 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -30,7 +30,6 @@ #include <linux/utime.h> #include <linux/utsname.h> #include <linux/personality.h> -#include <linux/timex.h> #include <linux/dnotify.h> #include <linux/module.h> #include <linux/binfmts.h> @@ -1157,79 +1156,6 @@ out: return err; } -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage int sys32_adjtimex(struct timex32 __user *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if (get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if (put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} - asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) { diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 02c8267e45e7..05a2c0567dae 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -273,7 +273,7 @@ EXPORT(sysn32_call_table) PTR sys_pivot_root PTR sys32_sysctl PTR sys_prctl - PTR sys32_adjtimex + PTR compat_sys_adjtimex PTR compat_sys_setrlimit /* 6155 */ PTR sys_chroot PTR sys_sync diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 797e0d874889..19c4ca481b02 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -328,7 +328,7 @@ sys_call_table: PTR sys_setdomainname PTR sys32_newuname PTR sys_ni_syscall /* sys_modify_ldt */ - PTR sys32_adjtimex + PTR compat_sys_adjtimex PTR sys_mprotect /* 4125 */ PTR compat_sys_sigprocmask PTR sys_ni_syscall /* was creat_module */ diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 0cb3b6097e0e..dcbfd27071f0 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -34,6 +34,7 @@ #include <linux/highmem.h> #include <linux/console.h> #include <linux/mmzone.h> +#include <linux/pfn.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -257,10 +258,6 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en return 0; } -#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - #define MAXMEM HIGHMEM_START #define MAXMEM_PFN PFN_DOWN(MAXMEM) @@ -493,10 +490,6 @@ static inline void resource_init(void) } } -#undef PFN_UP -#undef PFN_DOWN -#undef PFN_PHYS - #undef MAXMEM #undef MAXMEM_PFN diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 51273b7297a7..5e51a2d8f3f0 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -65,9 +65,9 @@ static int null_rtc_set_time(unsigned long sec) return 0; } -unsigned long (*rtc_get_time)(void) = null_rtc_get_time; -int (*rtc_set_time)(unsigned long) = null_rtc_set_time; -int (*rtc_set_mmss)(unsigned long); +unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time; +int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; +int (*rtc_mips_set_mmss)(unsigned long); /* usecs per counter cycle, shifted to left by 32 bits */ @@ -440,14 +440,14 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) /* * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be + * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ if (ntp_synced() && xtime.tv_sec > last_rtc_update + 660 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { - if (rtc_set_mmss(xtime.tv_sec) == 0) { + if (rtc_mips_set_mmss(xtime.tv_sec) == 0) { last_rtc_update = xtime.tv_sec; } else { /* do it again in 60 s */ @@ -565,7 +565,7 @@ asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs) * b) (optional) calibrate and set the mips_hpt_frequency * (only needed if you intended to use fixed_rate_gettimeoffset * or use cpu counter as timer interrupt source) - * 2) setup xtime based on rtc_get_time(). + * 2) setup xtime based on rtc_mips_get_time(). * 3) choose a appropriate gettimeoffset routine. * 4) calculate a couple of cached variables for later usage * 5) board_timer_setup() - @@ -633,10 +633,10 @@ void __init time_init(void) if (board_time_init) board_time_init(); - if (!rtc_set_mmss) - rtc_set_mmss = rtc_set_time; + if (!rtc_mips_set_mmss) + rtc_mips_set_mmss = rtc_mips_set_time; - xtime.tv_sec = rtc_get_time(); + xtime.tv_sec = rtc_mips_get_time(); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, @@ -772,8 +772,8 @@ void to_tm(unsigned long tim, struct rtc_time *tm) EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(to_tm); -EXPORT_SYMBOL(rtc_set_time); -EXPORT_SYMBOL(rtc_get_time); +EXPORT_SYMBOL(rtc_mips_set_time); +EXPORT_SYMBOL(rtc_mips_get_time); unsigned long long sched_clock(void) { diff --git a/arch/mips/lasat/setup.c b/arch/mips/lasat/setup.c index 83eb08b7a072..bb70a8240e61 100644 --- a/arch/mips/lasat/setup.c +++ b/arch/mips/lasat/setup.c @@ -165,7 +165,8 @@ void __init plat_setup(void) /* Set up panic notifier */ for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++) - notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]); + atomic_notifier_chain_register(&panic_notifier_list, + &lasat_panic_block[i]); lasat_reboot_setup(); @@ -174,8 +175,8 @@ void __init plat_setup(void) #ifdef CONFIG_DS1603 ds1603 = &ds_defs[mips_machtype]; - rtc_get_time = ds1603_read; - rtc_set_time = ds1603_set; + rtc_mips_get_time = ds1603_read; + rtc_mips_set_time = ds1603_set; #endif #ifdef DYNAMIC_SERIAL_INIT diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 8ff43a1c1e99..e3d5aaa90f0d 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -30,12 +30,13 @@ #include <linux/string.h> #include <linux/net.h> #include <linux/inet.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include "sysctl.h" #include "ds1603.h" -static DECLARE_MUTEX(lasat_info_sem); +static DEFINE_MUTEX(lasat_info_mutex); /* Strategy function to write EEPROM after changing string entry */ int sysctl_lasatstring(ctl_table *table, int *name, int nlen, @@ -43,17 +44,17 @@ int sysctl_lasatstring(ctl_table *table, int *name, int nlen, void *newval, size_t newlen, void **context) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = sysctl_string(table, name, nlen, oldval, oldlenp, newval, newlen, context); if (r < 0) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } if (newval && newlen) { lasat_write_eeprom_info(); } - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 1; } @@ -63,14 +64,14 @@ int proc_dolasatstring(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = proc_dostring(table, write, filp, buffer, lenp, ppos); if ( (!write) || r) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } lasat_write_eeprom_info(); - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } @@ -79,14 +80,14 @@ int proc_dolasatint(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = proc_dointvec(table, write, filp, buffer, lenp, ppos); if ( (!write) || r) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } lasat_write_eeprom_info(); - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } @@ -98,7 +99,7 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); if (!write) { rtctmp = ds1603_read(); /* check for time < 0 and set to 0 */ @@ -107,11 +108,11 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, } r = proc_dointvec(table, write, filp, buffer, lenp, ppos); if ( (!write) || r) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } ds1603_set(rtctmp); - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } #endif @@ -122,16 +123,16 @@ int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen, void *newval, size_t newlen, void **context) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); if (r < 0) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } if (newval && newlen) { lasat_write_eeprom_info(); } - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 1; } @@ -142,19 +143,19 @@ int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen, void *newval, size_t newlen, void **context) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); rtctmp = ds1603_read(); if (rtctmp < 0) rtctmp = 0; r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); if (r < 0) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } if (newval && newlen) { ds1603_set(rtctmp); } - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 1; } #endif @@ -192,13 +193,13 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, return 0; } - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); if (write) { len = 0; p = buffer; while (len < *lenp) { if(get_user(c, p++)) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return -EFAULT; } if (c == 0 || c == '\n') @@ -209,7 +210,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, len = sizeof(proc_lasat_ipbuf) - 1; if (copy_from_user(proc_lasat_ipbuf, buffer, len)) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return -EFAULT; } proc_lasat_ipbuf[len] = 0; @@ -230,12 +231,12 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, len = *lenp; if (len) if(copy_to_user(buffer, proc_lasat_ipbuf, len)) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return -EFAULT; } if (len < *lenp) { if(put_user('\n', ((char *) buffer) + len)) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return -EFAULT; } len++; @@ -244,7 +245,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, *ppos += len; } update_bcastaddr(); - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } #endif /* defined(CONFIG_INET) */ @@ -256,10 +257,10 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); if (r < 0) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } @@ -271,7 +272,7 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, lasat_write_eeprom_info(); lasat_init_board_info(); } - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } @@ -280,10 +281,10 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { int r; - down(&lasat_info_sem); + mutex_lock(&lasat_info_mutex); r = proc_dointvec(table, write, filp, buffer, lenp, ppos); if ( (!write) || r) { - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return r; } if (filp && filp->f_dentry) @@ -294,7 +295,7 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp, lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess; } lasat_write_eeprom_info(); - up(&lasat_info_sem); + mutex_unlock(&lasat_info_mutex); return 0; } diff --git a/arch/mips/mips-boards/atlas/atlas_setup.c b/arch/mips/mips-boards/atlas/atlas_setup.c index 873cf3141a31..c20d401ecf80 100644 --- a/arch/mips/mips-boards/atlas/atlas_setup.c +++ b/arch/mips/mips-boards/atlas/atlas_setup.c @@ -65,7 +65,7 @@ void __init plat_setup(void) board_time_init = mips_time_init; board_timer_setup = mips_timer_setup; - rtc_get_time = mips_rtc_get_time; + rtc_mips_get_time = mips_rtc_get_time; } static void __init serial_init(void) diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c index ee5e70c95cf3..32c9210373ac 100644 --- a/arch/mips/mips-boards/generic/memory.c +++ b/arch/mips/mips-boards/generic/memory.c @@ -49,9 +49,6 @@ static char *mtypes[3] = { /* References to section boundaries */ extern char _end; -#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) - - struct prom_pmemblock * __init prom_getmdesc(void) { char *memsize_str; @@ -109,10 +106,10 @@ struct prom_pmemblock * __init prom_getmdesc(void) mdesc[3].type = yamon_dontuse; mdesc[3].base = 0x00100000; - mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base; + mdesc[3].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[3].base; mdesc[4].type = yamon_free; - mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); + mdesc[4].base = CPHYSADDR(PAGE_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; return &mdesc[0]; diff --git a/arch/mips/mips-boards/malta/malta_setup.c b/arch/mips/mips-boards/malta/malta_setup.c index 2209e8a9de34..b8488aab6df1 100644 --- a/arch/mips/mips-boards/malta/malta_setup.c +++ b/arch/mips/mips-boards/malta/malta_setup.c @@ -225,5 +225,5 @@ void __init plat_setup(void) board_time_init = mips_time_init; board_timer_setup = mips_timer_setup; - rtc_get_time = mips_rtc_get_time; + rtc_mips_get_time = mips_rtc_get_time; } diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c index 1ec4e75656bd..e57f737bab10 100644 --- a/arch/mips/mips-boards/sim/sim_mem.c +++ b/arch/mips/mips-boards/sim/sim_mem.c @@ -42,9 +42,6 @@ static char *mtypes[3] = { /* References to section boundaries */ extern char _end; -#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) - - struct prom_pmemblock * __init prom_getmdesc(void) { unsigned int memsize; @@ -64,10 +61,10 @@ struct prom_pmemblock * __init prom_getmdesc(void) mdesc[2].type = simmem_reserved; mdesc[2].base = 0x00100000; - mdesc[2].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[2].base; + mdesc[2].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[2].base; mdesc[3].type = simmem_free; - mdesc[3].base = CPHYSADDR(PFN_ALIGN(&_end)); + mdesc[3].base = CPHYSADDR(PAGE_ALIGN(&_end)); mdesc[3].size = memsize - mdesc[3].base; return &mdesc[0]; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 52f7d59fe612..ad89c442f299 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -25,6 +25,7 @@ #include <linux/highmem.h> #include <linux/swap.h> #include <linux/proc_fs.h> +#include <linux/pfn.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> @@ -177,9 +178,6 @@ void __init paging_init(void) free_area_init(zones_size); } -#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) - static inline int page_is_ram(unsigned long pagenr) { int i; diff --git a/arch/mips/momentum/jaguar_atx/setup.c b/arch/mips/momentum/jaguar_atx/setup.c index 3784c898db1a..91d9637143d7 100644 --- a/arch/mips/momentum/jaguar_atx/setup.c +++ b/arch/mips/momentum/jaguar_atx/setup.c @@ -229,8 +229,8 @@ void momenco_time_init(void) mips_hpt_frequency = cpu_clock / 2; board_timer_setup = momenco_timer_setup; - rtc_get_time = m48t37y_get_time; - rtc_set_time = m48t37y_set_time; + rtc_mips_get_time = m48t37y_get_time; + rtc_mips_set_time = m48t37y_set_time; } static struct resource mv_pci_io_mem0_resource = { diff --git a/arch/mips/momentum/ocelot_3/setup.c b/arch/mips/momentum/ocelot_3/setup.c index f95677f4f06f..370e75d0e75c 100644 --- a/arch/mips/momentum/ocelot_3/setup.c +++ b/arch/mips/momentum/ocelot_3/setup.c @@ -58,6 +58,7 @@ #include <linux/bootmem.h> #include <linux/mv643xx.h> #include <linux/pm.h> +#include <linux/bcd.h> #include <asm/time.h> #include <asm/page.h> @@ -131,9 +132,6 @@ void setup_wired_tlb_entries(void) add_wired_entry(ENTRYLO(0xfc000000), ENTRYLO(0xfd000000), (signed)0xfc000000, PM_16M); } -#define CONV_BCD_TO_BIN(val) (((val) & 0xf) + (((val) >> 4) * 10)) -#define CONV_BIN_TO_BCD(val) (((val) % 10) + (((val) / 10) << 4)) - unsigned long m48t37y_get_time(void) { unsigned int year, month, day, hour, min, sec; @@ -143,16 +141,16 @@ unsigned long m48t37y_get_time(void) /* stop the update */ rtc_base[0x7ff8] = 0x40; - year = CONV_BCD_TO_BIN(rtc_base[0x7fff]); - year += CONV_BCD_TO_BIN(rtc_base[0x7ff1]) * 100; + year = BCD2BIN(rtc_base[0x7fff]); + year += BCD2BIN(rtc_base[0x7ff1]) * 100; - month = CONV_BCD_TO_BIN(rtc_base[0x7ffe]); + month = BCD2BIN(rtc_base[0x7ffe]); - day = CONV_BCD_TO_BIN(rtc_base[0x7ffd]); + day = BCD2BIN(rtc_base[0x7ffd]); - hour = CONV_BCD_TO_BIN(rtc_base[0x7ffb]); - min = CONV_BCD_TO_BIN(rtc_base[0x7ffa]); - sec = CONV_BCD_TO_BIN(rtc_base[0x7ff9]); + hour = BCD2BIN(rtc_base[0x7ffb]); + min = BCD2BIN(rtc_base[0x7ffa]); + sec = BCD2BIN(rtc_base[0x7ff9]); /* start the update */ rtc_base[0x7ff8] = 0x00; @@ -175,22 +173,22 @@ int m48t37y_set_time(unsigned long sec) rtc_base[0x7ff8] = 0x80; /* year */ - rtc_base[0x7fff] = CONV_BIN_TO_BCD(tm.tm_year % 100); - rtc_base[0x7ff1] = CONV_BIN_TO_BCD(tm.tm_year / 100); + rtc_base[0x7fff] = BIN2BCD(tm.tm_year % 100); + rtc_base[0x7ff1] = BIN2BCD(tm.tm_year / 100); /* month */ - rtc_base[0x7ffe] = CONV_BIN_TO_BCD(tm.tm_mon); + rtc_base[0x7ffe] = BIN2BCD(tm.tm_mon); /* day */ - rtc_base[0x7ffd] = CONV_BIN_TO_BCD(tm.tm_mday); + rtc_base[0x7ffd] = BIN2BCD(tm.tm_mday); /* hour/min/sec */ - rtc_base[0x7ffb] = CONV_BIN_TO_BCD(tm.tm_hour); - rtc_base[0x7ffa] = CONV_BIN_TO_BCD(tm.tm_min); - rtc_base[0x7ff9] = CONV_BIN_TO_BCD(tm.tm_sec); + rtc_base[0x7ffb] = BIN2BCD(tm.tm_hour); + rtc_base[0x7ffa] = BIN2BCD(tm.tm_min); + rtc_base[0x7ff9] = BIN2BCD(tm.tm_sec); /* day of week -- not really used, but let's keep it up-to-date */ - rtc_base[0x7ffc] = CONV_BIN_TO_BCD(tm.tm_wday + 1); + rtc_base[0x7ffc] = BIN2BCD(tm.tm_wday + 1); /* disable writing */ rtc_base[0x7ff8] = 0x00; @@ -215,8 +213,8 @@ void momenco_time_init(void) mips_hpt_frequency = cpu_clock / 2; board_timer_setup = momenco_timer_setup; - rtc_get_time = m48t37y_get_time; - rtc_set_time = m48t37y_set_time; + rtc_mips_get_time = m48t37y_get_time; + rtc_mips_set_time = m48t37y_set_time; } /* diff --git a/arch/mips/momentum/ocelot_c/setup.c b/arch/mips/momentum/ocelot_c/setup.c index bd02e60d037a..a3e6f5575592 100644 --- a/arch/mips/momentum/ocelot_c/setup.c +++ b/arch/mips/momentum/ocelot_c/setup.c @@ -227,8 +227,8 @@ void momenco_time_init(void) printk("momenco_time_init cpu_clock=%d\n", cpu_clock); board_timer_setup = momenco_timer_setup; - rtc_get_time = m48t37y_get_time; - rtc_set_time = m48t37y_set_time; + rtc_mips_get_time = m48t37y_get_time; + rtc_mips_set_time = m48t37y_set_time; } void __init plat_setup(void) diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 8bce711575de..3f724d661bdb 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c @@ -198,8 +198,8 @@ static void __init py_rtc_setup(void) if (!m48t37_base) printk(KERN_ERR "Mapping the RTC failed\n"); - rtc_get_time = m48t37y_get_time; - rtc_set_time = m48t37y_set_time; + rtc_mips_get_time = m48t37y_get_time; + rtc_mips_set_time = m48t37y_set_time; write_seqlock(&xtime_lock); xtime.tv_sec = m48t37y_get_time(); diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c index 92a3b3c15ed3..a9c58e067b53 100644 --- a/arch/mips/sgi-ip22/ip22-reset.c +++ b/arch/mips/sgi-ip22/ip22-reset.c @@ -238,7 +238,7 @@ static int __init reboot_setup(void) request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL); init_timer(&blink_timer); blink_timer.function = blink_timeout; - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index b7300cc5c5ad..cca688ad64ad 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -212,8 +212,8 @@ static void indy_timer_setup(struct irqaction *irq) void __init ip22_time_init(void) { /* setup hookup functions */ - rtc_get_time = indy_rtc_get_time; - rtc_set_time = indy_rtc_set_time; + rtc_mips_get_time = indy_rtc_get_time; + rtc_mips_set_time = indy_rtc_set_time; board_time_init = indy_time_init; board_timer_setup = indy_timer_setup; diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index e0d095daa5ed..6c00dce9f73f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -19,6 +19,7 @@ #include <linux/nodemask.h> #include <linux/swap.h> #include <linux/bootmem.h> +#include <linux/pfn.h> #include <asm/page.h> #include <asm/sections.h> @@ -28,8 +29,6 @@ #include <asm/sn/sn_private.h> -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) - #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 0c948008b023..ab9d9cef089e 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -193,7 +193,7 @@ static __init int ip32_reboot_setup(void) init_timer(&blink_timer); blink_timer.function = blink_timeout; - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL); diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c index 2f50c79b7887..a2dd8ae1ea8f 100644 --- a/arch/mips/sgi-ip32/ip32-setup.c +++ b/arch/mips/sgi-ip32/ip32-setup.c @@ -91,8 +91,8 @@ void __init plat_setup(void) { board_be_init = ip32_be_init; - rtc_get_time = mc146818_get_cmos_time; - rtc_set_mmss = mc146818_set_rtc_mmss; + rtc_mips_get_time = mc146818_get_cmos_time; + rtc_mips_set_mmss = mc146818_set_rtc_mmss; board_time_init = ip32_time_init; board_timer_setup = ip32_timer_setup; diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index b661d2425a36..4b5f74ff3edd 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -121,14 +121,14 @@ void __init plat_setup(void) if (xicor_probe()) { printk("swarm setup: Xicor 1241 RTC detected.\n"); - rtc_get_time = xicor_get_time; - rtc_set_time = xicor_set_time; + rtc_mips_get_time = xicor_get_time; + rtc_mips_set_time = xicor_set_time; } if (m41t81_probe()) { printk("swarm setup: M41T81 RTC detected.\n"); - rtc_get_time = m41t81_get_time; - rtc_set_time = m41t81_set_time; + rtc_mips_get_time = m41t81_get_time; + rtc_mips_set_time = m41t81_set_time; } printk("This kernel optimized for " diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c index 1141fcd13a59..01ba6c581e3d 100644 --- a/arch/mips/sni/setup.c +++ b/arch/mips/sni/setup.c @@ -164,8 +164,8 @@ static struct pci_controller sni_controller = { static inline void sni_pcimt_time_init(void) { - rtc_get_time = mc146818_get_cmos_time; - rtc_set_time = mc146818_set_rtc_mmss; + rtc_mips_get_time = mc146818_get_cmos_time; + rtc_mips_set_time = mc146818_set_rtc_mmss; } void __init plat_setup(void) diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c index 2ad6401d2af4..6dcf077f61a0 100644 --- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c +++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c @@ -1036,8 +1036,8 @@ toshiba_rbtx4927_time_init(void) #ifdef CONFIG_RTC_DS1742 - rtc_get_time = rtc_ds1742_get_time; - rtc_set_time = rtc_ds1742_set_time; + rtc_mips_get_time = rtc_ds1742_get_time; + rtc_mips_set_time = rtc_ds1742_set_time; TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIME_INIT, ":rtc_ds1742_init()-\n"); diff --git a/arch/mips/tx4938/common/rtc_rx5c348.c b/arch/mips/tx4938/common/rtc_rx5c348.c index d249edbb6af4..07f782fc0725 100644 --- a/arch/mips/tx4938/common/rtc_rx5c348.c +++ b/arch/mips/tx4938/common/rtc_rx5c348.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/rtc.h> #include <linux/time.h> +#include <linux/bcd.h> #include <asm/time.h> #include <asm/tx4938/spi.h> @@ -77,17 +78,6 @@ spi_rtc_io(unsigned char *inbuf, unsigned char *outbuf, unsigned int count) inbufs, incounts, outbufs, outcounts, 0); } -/* - * Conversion between binary and BCD. - */ -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - /* RTC-dependent code for time.c */ static int @@ -197,6 +187,6 @@ rtc_rx5c348_init(int chipid) srtc_24h = 1; /* set the function pointers */ - rtc_get_time = rtc_rx5c348_get_time; - rtc_set_time = rtc_rx5c348_set_time; + rtc_mips_get_time = rtc_rx5c348_get_time; + rtc_mips_set_time = rtc_rx5c348_set_time; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index eca33cfa8a4c..6b3c50964ca9 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -25,6 +25,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c index 2a01fe1bdc98..0cea6958f427 100644 --- a/arch/parisc/kernel/pdc_chassis.c +++ b/arch/parisc/kernel/pdc_chassis.c @@ -150,7 +150,8 @@ void __init parisc_pdc_chassis_init(void) if (handle) { /* initialize panic notifier chain */ - notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, + &pdc_chassis_panic_block); /* initialize reboot notifier chain */ register_reboot_notifier(&pdc_chassis_reboot_block); diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 613569018410..d286f68a3d3a 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -21,7 +21,6 @@ #include <linux/times.h> #include <linux/utsname.h> #include <linux/time.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -567,63 +566,6 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off } -struct timex32 { - unsigned int modes; /* mode selector */ - int offset; /* time offset (usec) */ - int freq; /* frequency offset (scaled ppm) */ - int maxerror; /* maximum error (usec) */ - int esterror; /* estimated error (usec) */ - int status; /* clock command/status */ - int constant; /* pll time constant */ - int precision; /* clock precision (usec) (read only) */ - int tolerance; /* clock frequency tolerance (ppm) - * (read only) - */ - struct compat_timeval time; /* (read only) */ - int tick; /* (modified) usecs between clock ticks */ - - int ppsfreq; /* pps frequency (scaled ppm) (ro) */ - int jitter; /* pps jitter (us) (ro) */ - int shift; /* interval duration (s) (shift) (ro) */ - int stabil; /* pps stability (scaled ppm) (ro) */ - int jitcnt; /* jitter limit exceeded (ro) */ - int calcnt; /* calibration intervals (ro) */ - int errcnt; /* calibration errors (ro) */ - int stbcnt; /* stability limit exceeded (ro) */ - - int :32; int :32; int :32; int :32; - int :32; int :32; int :32; int :32; - int :32; int :32; int :32; int :32; -}; - -asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32) -{ - struct timex txc; - struct timex32 t32; - int ret; - extern int do_adjtimex(struct timex *txc); - - if(copy_from_user(&t32, txc_p32, sizeof(struct timex32))) - return -EFAULT; -#undef CP -#define CP(x) txc.x = t32.x - CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror); - CP(status); CP(constant); CP(precision); CP(tolerance); - CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter); - CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt); - CP(stbcnt); - ret = do_adjtimex(&txc); -#undef CP -#define CP(x) t32.x = txc.x - CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror); - CP(status); CP(constant); CP(precision); CP(tolerance); - CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter); - CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt); - CP(stbcnt); - return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret; -} - - struct sysinfo32 { s32 uptime; u32 loads[3]; diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 71011eadb872..89b6c56ea0a8 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -207,7 +207,7 @@ /* struct sockaddr... */ ENTRY_SAME(recvfrom) /* struct timex contains longs */ - ENTRY_DIFF(adjtimex) + ENTRY_COMP(adjtimex) ENTRY_SAME(mprotect) /* 125 */ /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */ ENTRY_COMP(sigprocmask) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fae42da7468d..a433b7126d33 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -37,6 +37,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index cb1fe5878e8b..ad7a90212204 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -30,9 +30,11 @@ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> +#include <linux/module.h> #include <asm/cacheflush.h> #include <asm/kdebug.h> #include <asm/sstep.h> +#include <asm/uaccess.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -372,17 +374,62 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs); + const struct exception_table_entry *entry; + + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the nip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->nip = (unsigned long)cur->addr; regs->msr &= ~MSR_SE; regs->msr |= kcb->kprobe_saved_msr; - - reset_current_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if ((entry = search_exception_tables(regs->nip)) != NULL) { + regs->nip = entry->fixup; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } return 0; } @@ -396,6 +443,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1770a066c217..f698aa77127e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -35,7 +35,6 @@ #include <linux/mqueue.h> #include <linux/hardirq.h> #include <linux/utsname.h> -#include <linux/kprobes.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -460,7 +459,6 @@ void show_regs(struct pt_regs * regs) void exit_thread(void) { - kprobe_flush_task(current); discard_lazy_cpu_state(); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2f3fdad35594..e20c1fae3423 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -579,7 +579,8 @@ void __init setup_arch(char **cmdline_p) panic_timeout = 180; if (ppc_md.panic) - notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, + &ppc64_panic_block); init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) _etext; diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index cd75ab2908fa..ec274e688816 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -24,7 +24,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -161,78 +160,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2) return sys_sysfs((int)option, arg1, arg2); } -/* Handle adjtimex compatibility. */ -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if(get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if(put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} - asmlinkage long compat_sys_pause(void) { current->state = TASK_INTERRUPTIBLE; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 98660aedeeb7..9763faab6739 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -74,19 +74,19 @@ EXPORT_SYMBOL(__debugger_dabr_match); EXPORT_SYMBOL(__debugger_fault_handler); #endif -struct notifier_block *powerpc_die_chain; -static DEFINE_SPINLOCK(die_notifier_lock); +ATOMIC_NOTIFIER_HEAD(powerpc_die_chain); int register_die_notifier(struct notifier_block *nb) { - int err = 0; - unsigned long flags; + return atomic_notifier_chain_register(&powerpc_die_chain, nb); +} +EXPORT_SYMBOL(register_die_notifier); - spin_lock_irqsave(&die_notifier_lock, flags); - err = notifier_chain_register(&powerpc_die_chain, nb); - spin_unlock_irqrestore(&die_notifier_lock, flags); - return err; +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&powerpc_die_chain, nb); } +EXPORT_SYMBOL(unregister_die_notifier); /* * Trap & Exception support diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c index 8b0c132bc163..add8c1a9af68 100644 --- a/arch/powerpc/mm/imalloc.c +++ b/arch/powerpc/mm/imalloc.c @@ -13,12 +13,12 @@ #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/cacheflush.h> #include "mmu_decl.h" -static DECLARE_MUTEX(imlist_sem); +static DEFINE_MUTEX(imlist_mutex); struct vm_struct * imlist = NULL; static int get_free_im_addr(unsigned long size, unsigned long *im_addr) @@ -257,7 +257,7 @@ struct vm_struct * im_get_free_area(unsigned long size) struct vm_struct *area; unsigned long addr; - down(&imlist_sem); + mutex_lock(&imlist_mutex); if (get_free_im_addr(size, &addr)) { printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n", __FUNCTION__, size); @@ -272,7 +272,7 @@ struct vm_struct * im_get_free_area(unsigned long size) __FUNCTION__, addr, size); } next_im_done: - up(&imlist_sem); + mutex_unlock(&imlist_mutex); return area; } @@ -281,9 +281,9 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, { struct vm_struct *area; - down(&imlist_sem); + mutex_lock(&imlist_mutex); area = __im_get_area(v_addr, size, criteria); - up(&imlist_sem); + mutex_unlock(&imlist_mutex); return area; } @@ -297,17 +297,17 @@ void im_free(void * addr) printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); return; } - down(&imlist_sem); + mutex_lock(&imlist_mutex); for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { if (tmp->addr == addr) { *p = tmp->next; unmap_vm_area(tmp); kfree(tmp); - up(&imlist_sem); + mutex_unlock(&imlist_mutex); return; } } - up(&imlist_sem); + mutex_unlock(&imlist_mutex); printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, addr); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index badac10d700c..5e435a9c3431 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -195,7 +195,7 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long flags; pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; i++) { @@ -351,7 +351,7 @@ void __init mem_init(void) max_mapnr = max_pfn; totalram_pages += free_all_bootmem(); #endif - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { for (i = 0; i < pgdat->node_spanned_pages; i++) { if (!pfn_valid(pgdat->node_start_pfn + i)) continue; diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index d75ae03df686..a8fa1eeeb174 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -32,7 +32,7 @@ #include <asm/io.h> #include <asm/prom.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/spu.h> #include <asm/mmu_context.h> @@ -342,7 +342,7 @@ spu_free_irqs(struct spu *spu) } static LIST_HEAD(spu_list); -static DECLARE_MUTEX(spu_mutex); +static DEFINE_MUTEX(spu_mutex); static void spu_init_channels(struct spu *spu) { @@ -382,7 +382,7 @@ struct spu *spu_alloc(void) { struct spu *spu; - down(&spu_mutex); + mutex_lock(&spu_mutex); if (!list_empty(&spu_list)) { spu = list_entry(spu_list.next, struct spu, list); list_del_init(&spu->list); @@ -391,7 +391,7 @@ struct spu *spu_alloc(void) pr_debug("No SPU left\n"); spu = NULL; } - up(&spu_mutex); + mutex_unlock(&spu_mutex); if (spu) spu_init_channels(spu); @@ -402,9 +402,9 @@ EXPORT_SYMBOL_GPL(spu_alloc); void spu_free(struct spu *spu) { - down(&spu_mutex); + mutex_lock(&spu_mutex); list_add_tail(&spu->list, &spu_list); - up(&spu_mutex); + mutex_unlock(&spu_mutex); } EXPORT_SYMBOL_GPL(spu_free); @@ -633,14 +633,14 @@ static int __init create_spu(struct device_node *spe) spu->wbox_callback = NULL; spu->stop_callback = NULL; - down(&spu_mutex); + mutex_lock(&spu_mutex); spu->number = number++; ret = spu_request_irqs(spu); if (ret) goto out_unmap; list_add(&spu->list, &spu_list); - up(&spu_mutex); + mutex_unlock(&spu_mutex); pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", spu->name, spu->isrc, spu->local_store, @@ -648,7 +648,7 @@ static int __init create_spu(struct device_node *spe) goto out; out_unmap: - up(&spu_mutex); + mutex_unlock(&spu_mutex); spu_unmap(spu); out_free: kfree(spu); @@ -668,10 +668,10 @@ static void destroy_spu(struct spu *spu) static void cleanup_spu_base(void) { struct spu *spu, *tmp; - down(&spu_mutex); + mutex_lock(&spu_mutex); list_for_each_entry_safe(spu, tmp, &spu_list, list) destroy_spu(spu); - up(&spu_mutex); + mutex_unlock(&spu_mutex); } module_exit(cleanup_spu_base); diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index b3962c3a0348..5be40aa483fd 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -103,7 +103,7 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr) static int spufs_new_file(struct super_block *sb, struct dentry *dentry, - struct file_operations *fops, int mode, + const struct file_operations *fops, int mode, struct spu_context *ctx) { static struct inode_operations spufs_file_iops = { diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 12c6f689b1aa..7d7889026936 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -120,33 +120,15 @@ int chrp_set_rtc_time(struct rtc_time *tmarg) void chrp_get_rtc_time(struct rtc_time *tm) { unsigned int year, mon, day, hour, min, sec; - int uip, i; - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - - /* Since the UIP flag is set for about 2.2 ms and the clock - * is typically written with a precision of 1 jiffy, trying - * to obtain a precision better than a few milliseconds is - * an illusion. Only consistency is interesting, this also - * allows to use the routine for /dev/rtc without a potential - * 1 second kernel busy loop triggered by any reader of /dev/rtc. - */ - - for ( i = 0; i<1000000; i++) { - uip = chrp_cmos_clock_read(RTC_FREQ_SELECT); + do { sec = chrp_cmos_clock_read(RTC_SECONDS); min = chrp_cmos_clock_read(RTC_MINUTES); hour = chrp_cmos_clock_read(RTC_HOURS); day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH); mon = chrp_cmos_clock_read(RTC_MONTH); year = chrp_cmos_clock_read(RTC_YEAR); - uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT); - if ((uip & RTC_UIP)==0) break; - } + } while (sec != chrp_cmos_clock_read(RTC_SECONDS)); if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(sec); diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index 5e6981d17379..b9a2b3d4bf33 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -60,34 +60,14 @@ static void maple_clock_write(unsigned long val, int addr) void maple_get_rtc_time(struct rtc_time *tm) { - int uip, i; - - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - - /* Since the UIP flag is set for about 2.2 ms and the clock - * is typically written with a precision of 1 jiffy, trying - * to obtain a precision better than a few milliseconds is - * an illusion. Only consistency is interesting, this also - * allows to use the routine for /dev/rtc without a potential - * 1 second kernel busy loop triggered by any reader of /dev/rtc. - */ - - for (i = 0; i<1000000; i++) { - uip = maple_clock_read(RTC_FREQ_SELECT); + do { tm->tm_sec = maple_clock_read(RTC_SECONDS); tm->tm_min = maple_clock_read(RTC_MINUTES); tm->tm_hour = maple_clock_read(RTC_HOURS); tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH); tm->tm_mon = maple_clock_read(RTC_MONTH); tm->tm_year = maple_clock_read(RTC_YEAR); - uip |= maple_clock_read(RTC_FREQ_SELECT); - if ((uip & RTC_UIP)==0) - break; - } + } while (tm->tm_sec != maple_clock_read(RTC_SECONDS)); if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index a415e8d2f7af..b57e465a1b71 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c @@ -21,6 +21,7 @@ #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/completion.h> +#include <linux/mutex.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -90,7 +91,7 @@ static void (*g5_switch_volt)(int speed_mode); static int (*g5_switch_freq)(int speed_mode); static int (*g5_query_freq)(void); -static DECLARE_MUTEX(g5_switch_mutex); +static DEFINE_MUTEX(g5_switch_mutex); static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ @@ -327,7 +328,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, if (g5_pmode_cur == newstate) return 0; - down(&g5_switch_mutex); + mutex_lock(&g5_switch_mutex); freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; freqs.new = g5_cpu_freqs[newstate].frequency; @@ -337,7 +338,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, rc = g5_switch_freq(newstate); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - up(&g5_switch_mutex); + mutex_unlock(&g5_switch_mutex); return rc; } diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 86cfa6ecdcf3..5ad90676567a 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -94,16 +94,16 @@ static struct device_node *derive_parent(const char *path) return parent; } -static struct notifier_block *pSeries_reconfig_chain; +static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); int pSeries_reconfig_notifier_register(struct notifier_block *nb) { - return notifier_chain_register(&pSeries_reconfig_chain, nb); + return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); } void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { - notifier_chain_unregister(&pSeries_reconfig_chain, nb); + blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); } static int pSeries_reconfig_add_node(const char *path, struct property *proplist) @@ -131,7 +131,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist goto out_err; } - err = notifier_call_chain(&pSeries_reconfig_chain, + err = blocking_notifier_call_chain(&pSeries_reconfig_chain, PSERIES_RECONFIG_ADD, np); if (err == NOTIFY_BAD) { printk(KERN_ERR "Failed to add device node %s\n", path); @@ -171,7 +171,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np) remove_node_proc_entries(np); - notifier_call_chain(&pSeries_reconfig_chain, + blocking_notifier_call_chain(&pSeries_reconfig_chain, PSERIES_RECONFIG_REMOVE, np); of_detach_node(np); diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 54a0a9bb12dd..3a3e302b4ea2 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -19,6 +19,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c index 2f5c7650274f..9b84bffdefce 100644 --- a/arch/ppc/kernel/ppc_htab.c +++ b/arch/ppc/kernel/ppc_htab.c @@ -52,7 +52,7 @@ static int ppc_htab_open(struct inode *inode, struct file *file) return single_open(file, ppc_htab_show, NULL); } -struct file_operations ppc_htab_operations = { +const struct file_operations ppc_htab_operations = { .open = ppc_htab_open, .read = seq_read, .llseek = seq_lseek, diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c index c8627770af13..51e06ad66168 100644 --- a/arch/ppc/platforms/chrp_time.c +++ b/arch/ppc/platforms/chrp_time.c @@ -119,44 +119,28 @@ int chrp_set_rtc_time(unsigned long nowtime) unsigned long chrp_get_rtc_time(void) { unsigned int year, mon, day, hour, min, sec; - int uip, i; - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - - /* Since the UIP flag is set for about 2.2 ms and the clock - * is typically written with a precision of 1 jiffy, trying - * to obtain a precision better than a few milliseconds is - * an illusion. Only consistency is interesting, this also - * allows to use the routine for /dev/rtc without a potential - * 1 second kernel busy loop triggered by any reader of /dev/rtc. - */ - - for ( i = 0; i<1000000; i++) { - uip = chrp_cmos_clock_read(RTC_FREQ_SELECT); + do { sec = chrp_cmos_clock_read(RTC_SECONDS); min = chrp_cmos_clock_read(RTC_MINUTES); hour = chrp_cmos_clock_read(RTC_HOURS); day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH); mon = chrp_cmos_clock_read(RTC_MONTH); year = chrp_cmos_clock_read(RTC_YEAR); - uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT); - if ((uip & RTC_UIP)==0) break; + } while (sec != chrp_cmos_clock_read(RTC_SECONDS)); + + if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) + || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); } - if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } - if ((year += 1900) < 1970) + year += 1900; + if (year < 1970) year += 100; return mktime(year, mon, day, hour, min, sec); } diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c index a0fc628ffb1e..d95c05d9824d 100644 --- a/arch/ppc/platforms/prep_setup.c +++ b/arch/ppc/platforms/prep_setup.c @@ -736,7 +736,7 @@ ibm_statusled_progress(char *s, unsigned short hex) hex = 0xfff; if (!notifier_installed) { ++notifier_installed; - notifier_chain_register(&panic_notifier_list, + atomic_notifier_chain_register(&panic_notifier_list, &ibm_statusled_block); } } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2b7364ed23bc..01c5c082f970 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -14,6 +14,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c index def02bdc44a4..54fb11d7fadd 100644 --- a/arch/s390/crypto/crypt_s390_query.c +++ b/arch/s390/crypto/crypt_s390_query.c @@ -55,7 +55,7 @@ static void query_available_functions(void) printk(KERN_INFO "KMC_AES_256: %d\n", crypt_s390_func_available(KMC_AES_256_ENCRYPT)); - /* query available KIMD fucntions */ + /* query available KIMD functions */ printk(KERN_INFO "KIMD_QUERY: %d\n", crypt_s390_func_available(KIMD_QUERY)); printk(KERN_INFO "KIMD_SHA_1: %d\n", diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index cc058dc3bc8b..5e14de37c17b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -26,7 +26,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -705,79 +704,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, return ret; } -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long sys32_adjtimex(struct timex32 __user *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if(get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if(put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} - #ifdef CONFIG_SYSCTL struct __sysctl_args32 { u32 name; diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 50e80138e7ad..199da68bd7be 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -551,10 +551,10 @@ sys32_newuname_wrapper: llgtr %r2,%r2 # struct new_utsname * jg s390x_newuname # branch to system call - .globl sys32_adjtimex_wrapper -sys32_adjtimex_wrapper: - llgtr %r2,%r2 # struct timex_emu31 * - jg sys32_adjtimex # branch to system call + .globl compat_sys_adjtimex_wrapper +compat_sys_adjtimex_wrapper: + llgtr %r2,%r2 # struct compat_timex * + jg compat_sys_adjtimex # branch to system call .globl sys32_mprotect_wrapper sys32_mprotect_wrapper: diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 99182a415fe7..4a0f5a1551ea 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk) /* * Need to know about CPUs going idle? */ -static struct notifier_block *idle_chain; +static ATOMIC_NOTIFIER_HEAD(idle_chain); int register_idle_notifier(struct notifier_block *nb) { - return notifier_chain_register(&idle_chain, nb); + return atomic_notifier_chain_register(&idle_chain, nb); } EXPORT_SYMBOL(register_idle_notifier); int unregister_idle_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&idle_chain, nb); + return atomic_notifier_chain_unregister(&idle_chain, nb); } EXPORT_SYMBOL(unregister_idle_notifier); @@ -95,7 +95,7 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code) /* disable monitor call class 0 */ __ctl_clear_bit(8, 15); - notifier_call_chain(&idle_chain, CPU_NOT_IDLE, + atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE, (void *)(long) smp_processor_id()); } @@ -116,7 +116,8 @@ static void default_idle(void) return; } - rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu); + rc = atomic_notifier_call_chain(&idle_chain, + CPU_IDLE, (void *)(long) cpu); if (rc != NOTIFY_OK && rc != NOTIFY_DONE) BUG(); if (rc != NOTIFY_OK) { diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 7c88d85c3597..2f56654da821 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -132,7 +132,7 @@ SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) NI_SYSCALL /* modify_ldt for i386 */ -SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper) +SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) NI_SYSCALL /* old "create module" */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e9b275d90737..58583f459471 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_HARDIRQS bool default y diff --git a/arch/sh/boards/mpc1211/rtc.c b/arch/sh/boards/mpc1211/rtc.c index 4d100f048072..a76c655dceee 100644 --- a/arch/sh/boards/mpc1211/rtc.c +++ b/arch/sh/boards/mpc1211/rtc.c @@ -9,36 +9,16 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> +#include <linux/bcd.h> #include <linux/mc146818rtc.h> -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - -/* arc/i386/kernel/time.c */ unsigned long get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; spin_lock(&rtc_lock); - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - do { /* Isn't this overkill ? UIP above should guarantee consistency */ + + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); @@ -46,18 +26,22 @@ unsigned long get_cmos_time(void) mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + spin_unlock(&rtc_lock); - if ((year += 1900) < 1970) + + year += 1900; + if (year < 1970) year += 100; + return mktime(year, mon, day, hour, min, sec); } diff --git a/arch/sh/boards/sh03/rtc.c b/arch/sh/boards/sh03/rtc.c index cbeca7037ba5..d609863cfe53 100644 --- a/arch/sh/boards/sh03/rtc.c +++ b/arch/sh/boards/sh03/rtc.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> +#include <linux/bcd.h> #include <asm/io.h> #include <linux/rtc.h> #include <linux/spinlock.h> @@ -33,14 +34,6 @@ #define RTC_BUSY 1 #define RTC_STOP 2 -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - extern void (*rtc_get_time)(struct timespec *); extern int (*rtc_set_time)(const time_t); extern spinlock_t rtc_lock; @@ -48,13 +41,9 @@ extern spinlock_t rtc_lock; unsigned long get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; spin_lock(&rtc_lock); again: - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (!(ctrl_inb(RTC_CTL) & RTC_BUSY)) - break; do { sec = (ctrl_inb(RTC_SEC1) & 0xf) + (ctrl_inb(RTC_SEC10) & 0x7) * 10; min = (ctrl_inb(RTC_MIN1) & 0xf) + (ctrl_inb(RTC_MIN10) & 0xf) * 10; diff --git a/arch/sh/kernel/cpu/rtc.c b/arch/sh/kernel/cpu/rtc.c index f8361f5e788b..4304cf75cfa2 100644 --- a/arch/sh/kernel/cpu/rtc.c +++ b/arch/sh/kernel/cpu/rtc.c @@ -9,18 +9,10 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> - +#include <linux/bcd.h> #include <asm/io.h> #include <asm/rtc.h> -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - void sh_rtc_gettimeofday(struct timespec *ts) { unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index c0e79843f580..7ee4ca203616 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -20,6 +20,7 @@ #include <linux/root_dev.h> #include <linux/utsname.h> #include <linux/cpu.h> +#include <linux/pfn.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/sections.h> @@ -275,10 +276,6 @@ void __init setup_arch(char **cmdline_p) sh_mv_setup(cmdline_p); -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - /* * Find the highest page frame number we have available */ diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig index 07b172deb872..58c678e06667 100644 --- a/arch/sh64/Kconfig +++ b/arch/sh64/Kconfig @@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c index c7a7b816a30f..d2711c9c9d13 100644 --- a/arch/sh64/kernel/setup.c +++ b/arch/sh64/kernel/setup.c @@ -48,6 +48,7 @@ #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/initrd.h> +#include <linux/pfn.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/pgtable.h> diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c index 0773c9f389f3..6b8f4d22abc6 100644 --- a/arch/sh64/kernel/time.c +++ b/arch/sh64/kernel/time.c @@ -30,6 +30,7 @@ #include <linux/profile.h> #include <linux/smp.h> #include <linux/module.h> +#include <linux/bcd.h> #include <asm/registers.h> /* required by inline __asm__ stmt. */ @@ -105,14 +106,6 @@ #define RCR1 rtc_base+0x38 #define RCR2 rtc_base+0x3c -#ifndef BCD_TO_BIN -#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) -#endif - -#ifndef BIN_TO_BCD -#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) -#endif - #define TICK_SIZE (tick_nsec / 1000) extern unsigned long wall_jiffies; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 7c58fc1a39c4..9431e967aa45 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -150,6 +150,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 267afddf63cf..d1e2fc566486 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -162,6 +162,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y if !ULTRA_HAS_POPULATION_COUNT + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index f819a9663a8d..900fb0b940d8 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.16 -# Mon Mar 20 01:23:21 2006 +# Sun Mar 26 14:58:11 2006 # CONFIG_SPARC=y CONFIG_SPARC64=y @@ -38,6 +38,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_SYSCTL=y # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set +CONFIG_RELAY=y CONFIG_INITRAMFS_SOURCE="" CONFIG_UID16=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -53,10 +54,6 @@ CONFIG_BASE_FULL=y CONFIG_FUTEX=y CONFIG_EPOLL=y CONFIG_SHMEM=y -CONFIG_CC_ALIGN_FUNCTIONS=0 -CONFIG_CC_ALIGN_LABELS=0 -CONFIG_CC_ALIGN_LOOPS=0 -CONFIG_CC_ALIGN_JUMPS=0 CONFIG_SLAB=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 @@ -68,7 +65,6 @@ CONFIG_BASE_SMALL=0 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_OBSOLETE_MODPARM=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_KMOD=y @@ -76,6 +72,7 @@ CONFIG_KMOD=y # # Block layer # +CONFIG_BLK_DEV_IO_TRACE=y # # IO Schedulers @@ -111,6 +108,8 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_US3_FREQ=m CONFIG_US2E_FREQ=m CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_HUGETLB_PAGE_SIZE_4MB=y # CONFIG_HUGETLB_PAGE_SIZE_512K is not set @@ -128,7 +127,6 @@ CONFIG_HAVE_MEMORY_PRESENT=y CONFIG_SPARSEMEM_EXTREME=y CONFIG_MEMORY_HOTPLUG=y CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MIGRATION=y CONFIG_GENERIC_ISA_DMA=y CONFIG_SBUS=y CONFIG_SBUSCHAR=y @@ -136,7 +134,6 @@ CONFIG_SUN_AUXIO=y CONFIG_SUN_IO=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_LEGACY_PROC is not set # CONFIG_PCI_DEBUG is not set CONFIG_SUN_OPENPROMFS=m CONFIG_SPARC32_COMPAT=y @@ -201,6 +198,8 @@ CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_IPV6=m CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m @@ -213,10 +212,12 @@ CONFIG_IPV6_TUNNEL=m # CONFIG_IP_DCCP=m CONFIG_INET_DCCP_DIAG=m +CONFIG_IP_DCCP_ACKVEC=y # # DCCP CCIDs Configuration (EXPERIMENTAL) # +CONFIG_IP_DCCP_CCID2=m CONFIG_IP_DCCP_CCID3=m CONFIG_IP_DCCP_TFRC_LIB=m @@ -224,7 +225,6 @@ CONFIG_IP_DCCP_TFRC_LIB=m # DCCP Kernel Hacking # # CONFIG_IP_DCCP_DEBUG is not set -# CONFIG_IP_DCCP_UNLOAD_HACK is not set # # SCTP Configuration (EXPERIMENTAL) @@ -309,6 +309,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_UB=m # CONFIG_BLK_DEV_RAM is not set CONFIG_BLK_DEV_RAM_COUNT=16 +# CONFIG_BLK_DEV_INITRD is not set CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 CONFIG_CDROM_PKTCDVD_WCACHE=y @@ -722,7 +723,6 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_PROSAVAGE is not set # CONFIG_I2C_SAVAGE4 is not set -# CONFIG_SCx200_ACB is not set # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set # CONFIG_I2C_SIS96X is not set @@ -808,10 +808,6 @@ CONFIG_HWMON=y # # -# Multimedia Capabilities Port drivers -# - -# # Multimedia devices # # CONFIG_VIDEO_DEV is not set @@ -820,6 +816,7 @@ CONFIG_HWMON=y # Digital Video Broadcasting Devices # # CONFIG_DVB is not set +# CONFIG_USB_DABUSB is not set # # Graphics support @@ -901,10 +898,12 @@ CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_RTCTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set @@ -987,6 +986,7 @@ CONFIG_SND_SUN_CS4231=m # CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y +CONFIG_USB_ARCH_HAS_EHCI=y CONFIG_USB=y # CONFIG_USB_DEBUG is not set @@ -1014,7 +1014,6 @@ CONFIG_USB_UHCI_HCD=m # # USB Device Class drivers # -# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set # CONFIG_USB_ACM is not set # CONFIG_USB_PRINTER is not set @@ -1058,15 +1057,6 @@ CONFIG_USB_HIDDEV=y # CONFIG_USB_MICROTEK is not set # -# USB Multimedia devices -# -# CONFIG_USB_DABUSB is not set - -# -# Video4Linux support is needed for USB Multimedia device support -# - -# # USB Network Adapters # # CONFIG_USB_CATC is not set @@ -1194,7 +1184,6 @@ CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y -CONFIG_RELAYFS_FS=m # CONFIG_CONFIGFS_FS is not set # diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index b9a9ce70e55c..ffc7309e9f22 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -6,9 +6,11 @@ #include <linux/config.h> #include <linux/kernel.h> #include <linux/kprobes.h> +#include <linux/module.h> #include <asm/kdebug.h> #include <asm/signal.h> #include <asm/cacheflush.h> +#include <asm/uaccess.h> /* We do not have hardware single-stepping on sparc64. * So we implement software single-stepping with breakpoint @@ -302,16 +304,68 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *entry; + + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the tpc points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->tpc = (unsigned long)cur->addr; + regs->tnpc = kcb->kprobe_orig_tnpc; + regs->tstate = ((regs->tstate & ~TSTATE_PIL) | + kcb->kprobe_orig_tstate_pil); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs, kcb); + entry = search_exception_tables(regs->tpc); + if (entry) { + regs->tpc = entry->fixup; + regs->tnpc = regs->tpc + 4; + return 1; + } - reset_current_kprobe(); - preempt_enable_no_resched(); + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } + return 0; } @@ -324,6 +378,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_DEBUG: if (kprobe_handler(args->regs)) diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 9914a17651b4..f5e8db1de76b 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -175,11 +175,6 @@ EXPORT_SYMBOL(set_bit); EXPORT_SYMBOL(clear_bit); EXPORT_SYMBOL(change_bit); -/* Bit searching */ -EXPORT_SYMBOL(find_next_bit); -EXPORT_SYMBOL(find_next_zero_bit); -EXPORT_SYMBOL(find_next_zero_le_bit); - EXPORT_SYMBOL(ivector_table); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); @@ -279,18 +274,9 @@ EXPORT_SYMBOL(__prom_getsibling); /* sparc library symbols */ EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(strcpy); -EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); #ifdef CONFIG_SOLARIS_EMUL_MODULE EXPORT_SYMBOL(linux_sparc_syscall); @@ -324,7 +310,6 @@ EXPORT_SYMBOL(__memscan_zero); EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(__memcmp); EXPORT_SYMBOL(__memset); -EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 0e41df024489..2e906bad56fa 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -19,7 +19,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -945,79 +944,6 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, return ret; } -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long sys32_adjtimex(struct timex32 __user *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if (get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if (put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} - /* This is just a version for 32-bit applications which does * not force O_LARGEFILE on. */ diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index c3adb7ac167d..3b250f2318fd 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S @@ -63,7 +63,7 @@ sys_call_table32: /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo - .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex + .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 7d61f1bfd3d3..e55b5c6ece02 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -641,23 +641,8 @@ static void __init set_system_time(void) mon = MSTK_REG_MONTH(mregs); year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); } else { - int i; - /* Dallas 12887 RTC chip. */ - /* Stolen from arch/i386/kernel/time.c, see there for - * credits and descriptive comments. - */ - for (i = 0; i < 1000000; i++) { - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - udelay(10); - } - for (i = 0; i < 1000000; i++) { - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - udelay(10); - } do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); @@ -666,6 +651,7 @@ static void __init set_system_time(void) mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(sec); BCD_TO_BIN(min); diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index df612e4f75f9..ff090bb9734b 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -43,18 +43,19 @@ #include <linux/kmod.h> #endif -struct notifier_block *sparc64die_chain; -static DEFINE_SPINLOCK(die_notifier_lock); +ATOMIC_NOTIFIER_HEAD(sparc64die_chain); int register_die_notifier(struct notifier_block *nb) { - int err = 0; - unsigned long flags; - spin_lock_irqsave(&die_notifier_lock, flags); - err = notifier_chain_register(&sparc64die_chain, nb); - spin_unlock_irqrestore(&die_notifier_lock, flags); - return err; + return atomic_notifier_chain_register(&sparc64die_chain, nb); } +EXPORT_SYMBOL(register_die_notifier); + +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&sparc64die_chain, nb); +} +EXPORT_SYMBOL(unregister_die_notifier); /* When an irrecoverable trap occurs at tl > 0, the trap entry * code logs the trap state registers at every level in the trap diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 8812ded19f01..4a725d8985f1 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ NGpage.o NGbzero.o \ copy_in_user.o user_fixup.o memmove.o \ - mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o + mcount.o ipcsum.o rwsem.o xor.o delay.o obj-y += iomap.o diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c deleted file mode 100644 index 6059557067b4..000000000000 --- a/arch/sparc64/lib/find_bit.c +++ /dev/null @@ -1,127 +0,0 @@ -#include <linux/bitops.h> - -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - const unsigned long *p = addr + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < 64) - goto found_first; - if (tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if ((tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (64 - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/* find_next_zero_bit() finds the first zero bit in a bit string of length - * 'size' bits, starting the search at bit 'offset'. This is largely based - * on Linus's ALPHA routines, which are pretty portable BTW. - */ - -unsigned long find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (64-offset); - if (size < 64) - goto found_first; - if (~tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ -found_middle: - return result + ffz(tmp); -} - -unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = addr + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if(offset) { - tmp = __swab64p(p++); - tmp |= (~0UL >> (64-offset)); - if(size < 64) - goto found_first; - if(~tmp) - goto found_middle; - size -= 64; - result += 64; - } - while(size & ~63) { - if(~(tmp = __swab64p(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if(!size) - return result; - tmp = __swab64p(p); -found_first: - tmp |= (~0UL << size); - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ -found_middle: - return result + ffz(tmp); -} diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index d21ff3230c02..0db2f7d9fab5 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -413,12 +413,12 @@ good_area: #ifdef CONFIG_HUGETLB_PAGE mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); #endif - if (unlikely(mm_rss >= + if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_BASE, mm_rss); #ifdef CONFIG_HUGETLB_PAGE mm_rss = mm->context.huge_pte_count; - if (unlikely(mm_rss >= + if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_HUGE, mm_rss); #endif diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index ef79ed25aecd..85e6a55b3b59 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 @@ -52,3 +52,8 @@ config ARCH_HAS_SC_SIGNALS config ARCH_REUSE_HOST_VSYSCALL_AREA bool default y + +config GENERIC_HWEIGHT + bool + default y + diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index aae19bc4b06a..f60e9e506424 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 @@ -46,3 +46,8 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA config SMP_BROKEN bool default y + +config GENERIC_HWEIGHT + bool + default y + diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 54388d10bcf9..1488816588ea 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -762,7 +762,8 @@ static struct notifier_block panic_exit_notifier = { static int add_notifier(void) { - notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &panic_exit_notifier); return(0); } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index fa617e0719ab..0336575d2448 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -71,7 +71,7 @@ struct io_thread_req { int error; }; -extern int open_ubd_file(char *file, struct openflags *openflags, +extern int open_ubd_file(char *file, struct openflags *openflags, int shared, char **backing_file_out, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out, int *create_cow_out); @@ -137,7 +137,7 @@ static int fake_major = MAJOR_NR; static struct gendisk *ubd_gendisk[MAX_DEV]; static struct gendisk *fake_gendisk[MAX_DEV]; - + #ifdef CONFIG_BLK_DEV_UBD_SYNC #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ .cl = 1 }) @@ -168,6 +168,7 @@ struct ubd { __u64 size; struct openflags boot_openflags; struct openflags openflags; + int shared; int no_cow; struct cow cow; struct platform_device pdev; @@ -189,6 +190,7 @@ struct ubd { .boot_openflags = OPEN_FLAGS, \ .openflags = OPEN_FLAGS, \ .no_cow = 0, \ + .shared = 0, \ .cow = DEFAULT_COW, \ } @@ -305,7 +307,7 @@ static int ubd_setup_common(char *str, int *index_out) } major = simple_strtoul(str, &end, 0); if((*end != '\0') || (end == str)){ - printk(KERN_ERR + printk(KERN_ERR "ubd_setup : didn't parse major number\n"); return(1); } @@ -316,7 +318,7 @@ static int ubd_setup_common(char *str, int *index_out) printk(KERN_ERR "Can't assign a fake major twice\n"); goto out1; } - + fake_major = major; printk(KERN_INFO "Setting extra ubd major number to %d\n", @@ -351,7 +353,7 @@ static int ubd_setup_common(char *str, int *index_out) if (index_out) *index_out = n; - for (i = 0; i < 4; i++) { + for (i = 0; i < sizeof("rscd="); i++) { switch (*str) { case 'r': flags.w = 0; @@ -362,11 +364,14 @@ static int ubd_setup_common(char *str, int *index_out) case 'd': dev->no_cow = 1; break; + case 'c': + dev->shared = 1; + break; case '=': str++; goto break_loop; default: - printk(KERN_ERR "ubd_setup : Expected '=' or flag letter (r,s or d)\n"); + printk(KERN_ERR "ubd_setup : Expected '=' or flag letter (r, s, c, or d)\n"); goto out; } str++; @@ -515,7 +520,7 @@ static void ubd_handler(void) spin_unlock(&ubd_io_lock); return; } - + ubd_finish(rq, req.error); reactivate_fd(thread_fd, UBD_IRQ); do_ubd_request(ubd_queue); @@ -532,7 +537,7 @@ static int io_pid = -1; void kill_io_thread(void) { - if(io_pid != -1) + if(io_pid != -1) os_kill_process(io_pid, 1); } @@ -567,14 +572,15 @@ static int ubd_open_dev(struct ubd *dev) create_cow = 0; create_ptr = (dev->cow.file != NULL) ? &create_cow : NULL; back_ptr = dev->no_cow ? NULL : &dev->cow.file; - dev->fd = open_ubd_file(dev->file, &dev->openflags, back_ptr, - &dev->cow.bitmap_offset, &dev->cow.bitmap_len, - &dev->cow.data_offset, create_ptr); + dev->fd = open_ubd_file(dev->file, &dev->openflags, dev->shared, + back_ptr, &dev->cow.bitmap_offset, + &dev->cow.bitmap_len, &dev->cow.data_offset, + create_ptr); if((dev->fd == -ENOENT) && create_cow){ - dev->fd = create_cow_file(dev->file, dev->cow.file, + dev->fd = create_cow_file(dev->file, dev->cow.file, dev->openflags, 1 << 9, PAGE_SIZE, - &dev->cow.bitmap_offset, + &dev->cow.bitmap_offset, &dev->cow.bitmap_len, &dev->cow.data_offset); if(dev->fd >= 0){ @@ -598,16 +604,16 @@ static int ubd_open_dev(struct ubd *dev) } flush_tlb_kernel_vm(); - err = read_cow_bitmap(dev->fd, dev->cow.bitmap, - dev->cow.bitmap_offset, + err = read_cow_bitmap(dev->fd, dev->cow.bitmap, + dev->cow.bitmap_offset, dev->cow.bitmap_len); if(err < 0) goto error; flags = dev->openflags; flags.w = 0; - err = open_ubd_file(dev->cow.file, &flags, NULL, NULL, NULL, - NULL, NULL); + err = open_ubd_file(dev->cow.file, &flags, dev->shared, NULL, + NULL, NULL, NULL, NULL); if(err < 0) goto error; dev->cow.fd = err; } @@ -685,11 +691,11 @@ static int ubd_add(int n) dev->size = ROUND_BLOCK(dev->size); err = ubd_new_disk(MAJOR_NR, dev->size, n, &ubd_gendisk[n]); - if(err) + if(err) goto out_close; - + if(fake_major != MAJOR_NR) - ubd_new_disk(fake_major, dev->size, n, + ubd_new_disk(fake_major, dev->size, n, &fake_gendisk[n]); /* perhaps this should also be under the "if (fake_major)" above */ @@ -854,7 +860,7 @@ int ubd_init(void) return -1; } platform_driver_register(&ubd_driver); - for (i = 0; i < MAX_DEV; i++) + for (i = 0; i < MAX_DEV; i++) ubd_add(i); return 0; } @@ -872,16 +878,16 @@ int ubd_driver_init(void){ * enough. So use anyway the io thread. */ } stack = alloc_stack(0, 0); - io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), + io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), &thread_fd); if(io_pid < 0){ - printk(KERN_ERR + printk(KERN_ERR "ubd : Failed to start I/O thread (errno = %d) - " "falling back to synchronous I/O\n", -io_pid); io_pid = -1; return(0); } - err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, + err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, SA_INTERRUPT, "ubd", ubd_dev); if(err != 0) printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); @@ -978,7 +984,7 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, if(req->op == UBD_READ) { for(i = 0; i < req->length >> 9; i++){ if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) - ubd_set_bit(i, (unsigned char *) + ubd_set_bit(i, (unsigned char *) &req->sector_mask); } } @@ -999,7 +1005,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req) /* This should be impossible now */ if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ - printk("Write attempted on readonly ubd device %s\n", + printk("Write attempted on readonly ubd device %s\n", disk->disk_name); end_request(req, 0); return(1); @@ -1182,7 +1188,7 @@ int read_cow_bitmap(int fd, void *buf, int offset, int len) return(0); } -int open_ubd_file(char *file, struct openflags *openflags, +int open_ubd_file(char *file, struct openflags *openflags, int shared, char **backing_file_out, int *bitmap_offset_out, unsigned long *bitmap_len_out, int *data_offset_out, int *create_cow_out) @@ -1206,10 +1212,14 @@ int open_ubd_file(char *file, struct openflags *openflags, return fd; } - err = os_lock_file(fd, openflags->w); - if(err < 0){ - printk("Failed to lock '%s', err = %d\n", file, -err); - goto out_close; + if(shared) + printk("Not locking \"%s\" on the host\n", file); + else { + err = os_lock_file(fd, openflags->w); + if(err < 0){ + printk("Failed to lock '%s', err = %d\n", file, -err); + goto out_close; + } } /* Succesful return case! */ @@ -1260,7 +1270,7 @@ int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, int err, fd; flags.c = 1; - fd = open_ubd_file(cow_file, &flags, NULL, NULL, NULL, NULL, NULL); + fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); if(fd < 0){ err = fd; printk("Open of COW file '%s' failed, errno = %d\n", cow_file, diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h index b61deb8b362a..69a93c804f0e 100644 --- a/arch/um/include/irq_user.h +++ b/arch/um/include/irq_user.h @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -6,6 +6,17 @@ #ifndef __IRQ_USER_H__ #define __IRQ_USER_H__ +struct irq_fd { + struct irq_fd *next; + void *id; + int fd; + int type; + int irq; + int pid; + int events; + int current_events; +}; + enum { IRQ_READ, IRQ_WRITE }; extern void sigio_handler(int sig, union uml_pt_regs *regs); @@ -16,8 +27,6 @@ extern void reactivate_fd(int fd, int irqnum); extern void deactivate_fd(int fd, int irqnum); extern int deactivate_all_fds(void); extern void forward_interrupts(int pid); -extern void init_irq_signals(int on_sigstack); -extern void forward_ipi(int fd, int pid); extern int activate_ipi(int fd, int pid); extern unsigned long irq_lock(void); extern void irq_unlock(unsigned long flags); diff --git a/arch/um/include/kern.h b/arch/um/include/kern.h index 7d223beccbc0..4ce3fc650e57 100644 --- a/arch/um/include/kern.h +++ b/arch/um/include/kern.h @@ -29,7 +29,7 @@ extern int getuid(void); extern int getgid(void); extern int pause(void); extern int write(int, const void *, int); -extern int exit(int); +extern void exit(int); extern int close(int); extern int read(unsigned int, char *, int); extern int pipe(int *); diff --git a/arch/um/include/misc_constants.h b/arch/um/include/misc_constants.h new file mode 100644 index 000000000000..989bc08de36e --- /dev/null +++ b/arch/um/include/misc_constants.h @@ -0,0 +1,6 @@ +#ifndef __MISC_CONSTANT_H_ +#define __MISC_CONSTANT_H_ + +#include <user_constants.h> + +#endif diff --git a/arch/um/include/os.h b/arch/um/include/os.h index 2a1c64d8d0bf..d3d1bc6074ef 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h @@ -12,6 +12,7 @@ #include "sysdep/ptrace.h" #include "kern_util.h" #include "skas/mm_id.h" +#include "irq_user.h" #define OS_TYPE_FILE 1 #define OS_TYPE_DIR 2 @@ -121,6 +122,7 @@ static inline struct openflags of_cloexec(struct openflags flags) return(flags); } +/* file.c */ extern int os_stat_file(const char *file_name, struct uml_stat *buf); extern int os_stat_fd(const int fd, struct uml_stat *buf); extern int os_access(const char *file, int mode); @@ -156,10 +158,20 @@ extern int os_connect_socket(char *name); extern int os_file_type(char *file); extern int os_file_mode(char *file, struct openflags *mode_out); extern int os_lock_file(int fd, int excl); +extern void os_flush_stdout(void); +extern int os_stat_filesystem(char *path, long *bsize_out, + long long *blocks_out, long long *bfree_out, + long long *bavail_out, long long *files_out, + long long *ffree_out, void *fsid_out, + int fsid_size, long *namelen_out, + long *spare_out); +extern int os_change_dir(char *dir); +extern int os_fchange_dir(int fd); /* start_up.c */ extern void os_early_checks(void); extern int can_do_skas(void); +extern void os_check_bugs(void); /* Make sure they are clear when running in TT mode. Required by * SEGV_MAYBE_FIXABLE */ @@ -198,6 +210,8 @@ extern void os_flush_stdout(void); /* tt.c * for tt mode only (will be deleted in future...) */ +extern void forward_ipi(int fd, int pid); +extern void kill_child_dead(int pid); extern void stop(void); extern int wait_for_stop(int pid, int sig, int cont_type, void *relay); extern int protect_memory(unsigned long addr, unsigned long len, @@ -294,4 +308,26 @@ extern void initial_thread_cb_skas(void (*proc)(void *), extern void halt_skas(void); extern void reboot_skas(void); +/* irq.c */ +extern int os_waiting_for_events(struct irq_fd *active_fds); +extern int os_isatty(int fd); +extern int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds); +extern void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg, + struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2); +extern void os_free_irq_later(struct irq_fd *active_fds, + int irq, void *dev_id); +extern int os_get_pollfd(int i); +extern void os_set_pollfd(int i, int fd); +extern void os_set_ioignore(void); +extern void init_irq_signals(int on_sigstack); + +/* sigio.c */ +extern void write_sigio_workaround(void); +extern int add_sigio_fd(int fd, int read); +extern int ignore_sigio_fd(int fd); + +/* skas/trap */ +extern void sig_handler_common_skas(int sig, void *sc_ptr); +extern void user_signal(int sig, union uml_pt_regs *regs, int pid); + #endif diff --git a/arch/um/include/sigio.h b/arch/um/include/sigio.h index 37d76e29a147..fe99ea163c2e 100644 --- a/arch/um/include/sigio.h +++ b/arch/um/include/sigio.h @@ -8,9 +8,6 @@ extern int write_sigio_irq(int fd); extern int register_sigio_fd(int fd); -extern int read_sigio_fd(int fd); -extern int add_sigio_fd(int fd, int read); -extern int ignore_sigio_fd(int fd); extern void sigio_lock(void); extern void sigio_unlock(void); diff --git a/arch/um/include/skas/mode-skas.h b/arch/um/include/skas/mode-skas.h index 260065cfeef1..8bc6916bbbb1 100644 --- a/arch/um/include/skas/mode-skas.h +++ b/arch/um/include/skas/mode-skas.h @@ -13,7 +13,6 @@ extern unsigned long exec_fp_regs[]; extern unsigned long exec_fpx_regs[]; extern int have_fpx_regs; -extern void sig_handler_common_skas(int sig, void *sc_ptr); extern void kill_off_processes_skas(void); #endif diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h index 86357282d681..853b26f148c5 100644 --- a/arch/um/include/skas/skas.h +++ b/arch/um/include/skas/skas.h @@ -17,7 +17,6 @@ extern int user_thread(unsigned long stack, int flags); extern void new_thread_proc(void *stack, void (*handler)(int sig)); extern void new_thread_handler(int sig); extern void handle_syscall(union uml_pt_regs *regs); -extern void user_signal(int sig, union uml_pt_regs *regs, int pid); extern int new_mm(unsigned long stack); extern void get_skas_faultinfo(int pid, struct faultinfo * fi); extern long execute_syscall_skas(void *r); diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h index a6f1f176cf84..992a7e1e0fca 100644 --- a/arch/um/include/user_util.h +++ b/arch/um/include/user_util.h @@ -58,7 +58,6 @@ extern int attach(int pid); extern void kill_child_dead(int pid); extern int cont(int pid); extern void check_sigio(void); -extern void write_sigio_workaround(void); extern void arch_check_bugs(void); extern int cpu_feature(char *what, char *buf, int len); extern int arch_handle_signal(int sig, union uml_pt_regs *regs); diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 693018ba80f1..fe08971b64cf 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -7,23 +7,20 @@ extra-y := vmlinux.lds clean-files := obj-y = config.o exec_kern.o exitcode.o \ - init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ - process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ + init_task.o irq.o ksyms.o mem.o physmem.o \ + process_kern.o ptrace.o reboot.o resource.o sigio_kern.o \ signal_kern.o smp.o syscall_kern.o sysrq.o \ time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o obj-$(CONFIG_GPROF) += gprof_syms.o obj-$(CONFIG_GCOV) += gmon_syms.o -obj-$(CONFIG_TTY_LOG) += tty_log.o obj-$(CONFIG_SYSCALL_DEBUG) += syscall.o obj-$(CONFIG_MODE_TT) += tt/ obj-$(CONFIG_MODE_SKAS) += skas/ -user-objs-$(CONFIG_TTY_LOG) += tty_log.o - -USER_OBJS := $(user-objs-y) config.o tty_log.o +USER_OBJS := config.o include arch/um/scripts/Makefile.rules diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c index c264e1c05ab3..1ca84319317d 100644 --- a/arch/um/kernel/exec_kern.c +++ b/arch/um/kernel/exec_kern.c @@ -30,8 +30,6 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) CHOOSE_MODE_PROC(start_thread_tt, start_thread_skas, regs, eip, esp); } -extern void log_exec(char **argv, void *tty); - static long execve1(char *file, char __user * __user *argv, char __user *__user *env) { diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index bbf94bf2921e..c39ea3abeda4 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -31,6 +31,8 @@ #include "irq_user.h" #include "irq_kern.h" #include "os.h" +#include "sigio.h" +#include "misc_constants.h" /* * Generic, controller-independent functions: @@ -77,6 +79,298 @@ skip: return 0; } +struct irq_fd *active_fds = NULL; +static struct irq_fd **last_irq_ptr = &active_fds; + +extern void free_irqs(void); + +void sigio_handler(int sig, union uml_pt_regs *regs) +{ + struct irq_fd *irq_fd; + int n; + + if(smp_sigio_handler()) return; + while(1){ + n = os_waiting_for_events(active_fds); + if (n <= 0) { + if(n == -EINTR) continue; + else break; + } + + for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ + if(irq_fd->current_events != 0){ + irq_fd->current_events = 0; + do_IRQ(irq_fd->irq, regs); + } + } + } + + free_irqs(); +} + +static void maybe_sigio_broken(int fd, int type) +{ + if(os_isatty(fd)){ + if((type == IRQ_WRITE) && !pty_output_sigio){ + write_sigio_workaround(); + add_sigio_fd(fd, 0); + } + else if((type == IRQ_READ) && !pty_close_sigio){ + write_sigio_workaround(); + add_sigio_fd(fd, 1); + } + } +} + + +int activate_fd(int irq, int fd, int type, void *dev_id) +{ + struct pollfd *tmp_pfd; + struct irq_fd *new_fd, *irq_fd; + unsigned long flags; + int pid, events, err, n; + + pid = os_getpid(); + err = os_set_fd_async(fd, pid); + if(err < 0) + goto out; + + new_fd = um_kmalloc(sizeof(*new_fd)); + err = -ENOMEM; + if(new_fd == NULL) + goto out; + + if(type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI; + else events = UM_POLLOUT; + *new_fd = ((struct irq_fd) { .next = NULL, + .id = dev_id, + .fd = fd, + .type = type, + .irq = irq, + .pid = pid, + .events = events, + .current_events = 0 } ); + + /* Critical section - locked by a spinlock because this stuff can + * be changed from interrupt handlers. The stuff above is done + * outside the lock because it allocates memory. + */ + + /* Actually, it only looks like it can be called from interrupt + * context. The culprit is reactivate_fd, which calls + * maybe_sigio_broken, which calls write_sigio_workaround, + * which calls activate_fd. However, write_sigio_workaround should + * only be called once, at boot time. That would make it clear that + * this is called only from process context, and can be locked with + * a semaphore. + */ + flags = irq_lock(); + for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ + if((irq_fd->fd == fd) && (irq_fd->type == type)){ + printk("Registering fd %d twice\n", fd); + printk("Irqs : %d, %d\n", irq_fd->irq, irq); + printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id); + goto out_unlock; + } + } + + /*-------------*/ + if(type == IRQ_WRITE) + fd = -1; + + tmp_pfd = NULL; + n = 0; + + while(1){ + n = os_create_pollfd(fd, events, tmp_pfd, n); + if (n == 0) + break; + + /* n > 0 + * It means we couldn't put new pollfd to current pollfds + * and tmp_fds is NULL or too small for new pollfds array. + * Needed size is equal to n as minimum. + * + * Here we have to drop the lock in order to call + * kmalloc, which might sleep. + * If something else came in and changed the pollfds array + * so we will not be able to put new pollfd struct to pollfds + * then we free the buffer tmp_fds and try again. + */ + irq_unlock(flags); + if (tmp_pfd != NULL) { + kfree(tmp_pfd); + tmp_pfd = NULL; + } + + tmp_pfd = um_kmalloc(n); + if (tmp_pfd == NULL) + goto out_kfree; + + flags = irq_lock(); + } + /*-------------*/ + + *last_irq_ptr = new_fd; + last_irq_ptr = &new_fd->next; + + irq_unlock(flags); + + /* This calls activate_fd, so it has to be outside the critical + * section. + */ + maybe_sigio_broken(fd, type); + + return(0); + + out_unlock: + irq_unlock(flags); + out_kfree: + kfree(new_fd); + out: + return(err); +} + +static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) +{ + unsigned long flags; + + flags = irq_lock(); + os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr); + irq_unlock(flags); +} + +struct irq_and_dev { + int irq; + void *dev; +}; + +static int same_irq_and_dev(struct irq_fd *irq, void *d) +{ + struct irq_and_dev *data = d; + + return((irq->irq == data->irq) && (irq->id == data->dev)); +} + +void free_irq_by_irq_and_dev(unsigned int irq, void *dev) +{ + struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, + .dev = dev }); + + free_irq_by_cb(same_irq_and_dev, &data); +} + +static int same_fd(struct irq_fd *irq, void *fd) +{ + return(irq->fd == *((int *) fd)); +} + +void free_irq_by_fd(int fd) +{ + free_irq_by_cb(same_fd, &fd); +} + +static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) +{ + struct irq_fd *irq; + int i = 0; + int fdi; + + for(irq=active_fds; irq != NULL; irq = irq->next){ + if((irq->fd == fd) && (irq->irq == irqnum)) break; + i++; + } + if(irq == NULL){ + printk("find_irq_by_fd doesn't have descriptor %d\n", fd); + goto out; + } + fdi = os_get_pollfd(i); + if((fdi != -1) && (fdi != fd)){ + printk("find_irq_by_fd - mismatch between active_fds and " + "pollfds, fd %d vs %d, need %d\n", irq->fd, + fdi, fd); + irq = NULL; + goto out; + } + *index_out = i; + out: + return(irq); +} + +void reactivate_fd(int fd, int irqnum) +{ + struct irq_fd *irq; + unsigned long flags; + int i; + + flags = irq_lock(); + irq = find_irq_by_fd(fd, irqnum, &i); + if(irq == NULL){ + irq_unlock(flags); + return; + } + os_set_pollfd(i, irq->fd); + irq_unlock(flags); + + /* This calls activate_fd, so it has to be outside the critical + * section. + */ + maybe_sigio_broken(fd, irq->type); +} + +void deactivate_fd(int fd, int irqnum) +{ + struct irq_fd *irq; + unsigned long flags; + int i; + + flags = irq_lock(); + irq = find_irq_by_fd(fd, irqnum, &i); + if(irq == NULL) + goto out; + os_set_pollfd(i, -1); + out: + irq_unlock(flags); +} + +int deactivate_all_fds(void) +{ + struct irq_fd *irq; + int err; + + for(irq=active_fds;irq != NULL;irq = irq->next){ + err = os_clear_fd_async(irq->fd); + if(err) + return(err); + } + /* If there is a signal already queued, after unblocking ignore it */ + os_set_ioignore(); + + return(0); +} + +void forward_interrupts(int pid) +{ + struct irq_fd *irq; + unsigned long flags; + int err; + + flags = irq_lock(); + for(irq=active_fds;irq != NULL;irq = irq->next){ + err = os_set_owner(irq->fd, pid); + if(err < 0){ + /* XXX Just remove the irq rather than + * print out an infinite stream of these + */ + printk("Failed to forward %d to pid %d, err = %d\n", + irq->fd, pid, -err); + } + + irq->pid = pid; + } + irq_unlock(flags); +} + /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c deleted file mode 100644 index 0e32f5f4a887..000000000000 --- a/arch/um/kernel/irq_user.c +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) - * Licensed under the GPL - */ - -#include <stdlib.h> -#include <unistd.h> -#include <errno.h> -#include <signal.h> -#include <string.h> -#include <sys/poll.h> -#include <sys/types.h> -#include <sys/time.h> -#include "user_util.h" -#include "kern_util.h" -#include "user.h" -#include "process.h" -#include "sigio.h" -#include "irq_user.h" -#include "os.h" - -struct irq_fd { - struct irq_fd *next; - void *id; - int fd; - int type; - int irq; - int pid; - int events; - int current_events; -}; - -static struct irq_fd *active_fds = NULL; -static struct irq_fd **last_irq_ptr = &active_fds; - -static struct pollfd *pollfds = NULL; -static int pollfds_num = 0; -static int pollfds_size = 0; - -extern int io_count, intr_count; - -extern void free_irqs(void); - -void sigio_handler(int sig, union uml_pt_regs *regs) -{ - struct irq_fd *irq_fd; - int i, n; - - if(smp_sigio_handler()) return; - while(1){ - n = poll(pollfds, pollfds_num, 0); - if(n < 0){ - if(errno == EINTR) continue; - printk("sigio_handler : poll returned %d, " - "errno = %d\n", n, errno); - break; - } - if(n == 0) break; - - irq_fd = active_fds; - for(i = 0; i < pollfds_num; i++){ - if(pollfds[i].revents != 0){ - irq_fd->current_events = pollfds[i].revents; - pollfds[i].fd = -1; - } - irq_fd = irq_fd->next; - } - - for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ - if(irq_fd->current_events != 0){ - irq_fd->current_events = 0; - do_IRQ(irq_fd->irq, regs); - } - } - } - - free_irqs(); -} - -int activate_ipi(int fd, int pid) -{ - return(os_set_fd_async(fd, pid)); -} - -static void maybe_sigio_broken(int fd, int type) -{ - if(isatty(fd)){ - if((type == IRQ_WRITE) && !pty_output_sigio){ - write_sigio_workaround(); - add_sigio_fd(fd, 0); - } - else if((type == IRQ_READ) && !pty_close_sigio){ - write_sigio_workaround(); - add_sigio_fd(fd, 1); - } - } -} - -int activate_fd(int irq, int fd, int type, void *dev_id) -{ - struct pollfd *tmp_pfd; - struct irq_fd *new_fd, *irq_fd; - unsigned long flags; - int pid, events, err, n, size; - - pid = os_getpid(); - err = os_set_fd_async(fd, pid); - if(err < 0) - goto out; - - new_fd = um_kmalloc(sizeof(*new_fd)); - err = -ENOMEM; - if(new_fd == NULL) - goto out; - - if(type == IRQ_READ) events = POLLIN | POLLPRI; - else events = POLLOUT; - *new_fd = ((struct irq_fd) { .next = NULL, - .id = dev_id, - .fd = fd, - .type = type, - .irq = irq, - .pid = pid, - .events = events, - .current_events = 0 } ); - - /* Critical section - locked by a spinlock because this stuff can - * be changed from interrupt handlers. The stuff above is done - * outside the lock because it allocates memory. - */ - - /* Actually, it only looks like it can be called from interrupt - * context. The culprit is reactivate_fd, which calls - * maybe_sigio_broken, which calls write_sigio_workaround, - * which calls activate_fd. However, write_sigio_workaround should - * only be called once, at boot time. That would make it clear that - * this is called only from process context, and can be locked with - * a semaphore. - */ - flags = irq_lock(); - for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ - if((irq_fd->fd == fd) && (irq_fd->type == type)){ - printk("Registering fd %d twice\n", fd); - printk("Irqs : %d, %d\n", irq_fd->irq, irq); - printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id); - goto out_unlock; - } - } - - n = pollfds_num; - if(n == pollfds_size){ - while(1){ - /* Here we have to drop the lock in order to call - * kmalloc, which might sleep. If something else - * came in and changed the pollfds array, we free - * the buffer and try again. - */ - irq_unlock(flags); - size = (pollfds_num + 1) * sizeof(pollfds[0]); - tmp_pfd = um_kmalloc(size); - flags = irq_lock(); - if(tmp_pfd == NULL) - goto out_unlock; - if(n == pollfds_size) - break; - kfree(tmp_pfd); - } - if(pollfds != NULL){ - memcpy(tmp_pfd, pollfds, - sizeof(pollfds[0]) * pollfds_size); - kfree(pollfds); - } - pollfds = tmp_pfd; - pollfds_size++; - } - - if(type == IRQ_WRITE) - fd = -1; - - pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, - .events = events, - .revents = 0 }); - pollfds_num++; - - *last_irq_ptr = new_fd; - last_irq_ptr = &new_fd->next; - - irq_unlock(flags); - - /* This calls activate_fd, so it has to be outside the critical - * section. - */ - maybe_sigio_broken(fd, type); - - return(0); - - out_unlock: - irq_unlock(flags); - kfree(new_fd); - out: - return(err); -} - -static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) -{ - struct irq_fd **prev; - unsigned long flags; - int i = 0; - - flags = irq_lock(); - prev = &active_fds; - while(*prev != NULL){ - if((*test)(*prev, arg)){ - struct irq_fd *old_fd = *prev; - if((pollfds[i].fd != -1) && - (pollfds[i].fd != (*prev)->fd)){ - printk("free_irq_by_cb - mismatch between " - "active_fds and pollfds, fd %d vs %d\n", - (*prev)->fd, pollfds[i].fd); - goto out; - } - - pollfds_num--; - - /* This moves the *whole* array after pollfds[i] (though - * it doesn't spot as such)! */ - - memmove(&pollfds[i], &pollfds[i + 1], - (pollfds_num - i) * sizeof(pollfds[0])); - - if(last_irq_ptr == &old_fd->next) - last_irq_ptr = prev; - *prev = (*prev)->next; - if(old_fd->type == IRQ_WRITE) - ignore_sigio_fd(old_fd->fd); - kfree(old_fd); - continue; - } - prev = &(*prev)->next; - i++; - } - out: - irq_unlock(flags); -} - -struct irq_and_dev { - int irq; - void *dev; -}; - -static int same_irq_and_dev(struct irq_fd *irq, void *d) -{ - struct irq_and_dev *data = d; - - return((irq->irq == data->irq) && (irq->id == data->dev)); -} - -void free_irq_by_irq_and_dev(unsigned int irq, void *dev) -{ - struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, - .dev = dev }); - - free_irq_by_cb(same_irq_and_dev, &data); -} - -static int same_fd(struct irq_fd *irq, void *fd) -{ - return(irq->fd == *((int *) fd)); -} - -void free_irq_by_fd(int fd) -{ - free_irq_by_cb(same_fd, &fd); -} - -static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) -{ - struct irq_fd *irq; - int i = 0; - - for(irq=active_fds; irq != NULL; irq = irq->next){ - if((irq->fd == fd) && (irq->irq == irqnum)) break; - i++; - } - if(irq == NULL){ - printk("find_irq_by_fd doesn't have descriptor %d\n", fd); - goto out; - } - if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){ - printk("find_irq_by_fd - mismatch between active_fds and " - "pollfds, fd %d vs %d, need %d\n", irq->fd, - pollfds[i].fd, fd); - irq = NULL; - goto out; - } - *index_out = i; - out: - return(irq); -} - -void reactivate_fd(int fd, int irqnum) -{ - struct irq_fd *irq; - unsigned long flags; - int i; - - flags = irq_lock(); - irq = find_irq_by_fd(fd, irqnum, &i); - if(irq == NULL){ - irq_unlock(flags); - return; - } - - pollfds[i].fd = irq->fd; - - irq_unlock(flags); - - /* This calls activate_fd, so it has to be outside the critical - * section. - */ - maybe_sigio_broken(fd, irq->type); -} - -void deactivate_fd(int fd, int irqnum) -{ - struct irq_fd *irq; - unsigned long flags; - int i; - - flags = irq_lock(); - irq = find_irq_by_fd(fd, irqnum, &i); - if(irq == NULL) - goto out; - pollfds[i].fd = -1; - out: - irq_unlock(flags); -} - -int deactivate_all_fds(void) -{ - struct irq_fd *irq; - int err; - - for(irq=active_fds;irq != NULL;irq = irq->next){ - err = os_clear_fd_async(irq->fd); - if(err) - return(err); - } - /* If there is a signal already queued, after unblocking ignore it */ - set_handler(SIGIO, SIG_IGN, 0, -1); - - return(0); -} - -void forward_ipi(int fd, int pid) -{ - int err; - - err = os_set_owner(fd, pid); - if(err < 0) - printk("forward_ipi: set_owner failed, fd = %d, me = %d, " - "target = %d, err = %d\n", fd, os_getpid(), pid, -err); -} - -void forward_interrupts(int pid) -{ - struct irq_fd *irq; - unsigned long flags; - int err; - - flags = irq_lock(); - for(irq=active_fds;irq != NULL;irq = irq->next){ - err = os_set_owner(irq->fd, pid); - if(err < 0){ - /* XXX Just remove the irq rather than - * print out an infinite stream of these - */ - printk("Failed to forward %d to pid %d, err = %d\n", - irq->fd, pid, -err); - } - - irq->pid = pid; - } - irq_unlock(flags); -} - -void init_irq_signals(int on_sigstack) -{ - __sighandler_t h; - int flags; - - flags = on_sigstack ? SA_ONSTACK : 0; - if(timer_irq_inited) h = (__sighandler_t) alarm_handler; - else h = boot_timer_handler; - - set_handler(SIGVTALRM, h, flags | SA_RESTART, - SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1); - set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART, - SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); - signal(SIGWINCH, SIG_IGN); -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index 0e65340eee33..0500800df1c1 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -9,6 +9,7 @@ #include "linux/vmalloc.h" #include "linux/bootmem.h" #include "linux/module.h" +#include "linux/pfn.h" #include "asm/types.h" #include "asm/pgtable.h" #include "kern_util.h" @@ -316,8 +317,6 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len, } } -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) - extern int __syscall_stub_start, __binary_start; void setup_physmem(unsigned long start, unsigned long reserve_end, diff --git a/arch/um/kernel/sigio_kern.c b/arch/um/kernel/sigio_kern.c index 229988463c4c..1c1300fb1e95 100644 --- a/arch/um/kernel/sigio_kern.c +++ b/arch/um/kernel/sigio_kern.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com) * Licensed under the GPL */ @@ -12,13 +12,16 @@ #include "sigio.h" #include "irq_user.h" #include "irq_kern.h" +#include "os.h" /* Protected by sigio_lock() called from write_sigio_workaround */ static int sigio_irq_fd = -1; static irqreturn_t sigio_interrupt(int irq, void *data, struct pt_regs *unused) { - read_sigio_fd(sigio_irq_fd); + char c; + + os_read_file(sigio_irq_fd, &c, sizeof(c)); reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ); return(IRQ_HANDLED); } @@ -51,6 +54,9 @@ void sigio_unlock(void) spin_unlock(&sigio_spinlock); } +extern void sigio_cleanup(void); +__uml_exitcall(sigio_cleanup); + /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 72113b0a96e7..c8d8d0ac1a7f 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) * Licensed under the GPL */ @@ -77,9 +77,9 @@ static int idle_proc(void *cpup) if(err < 0) panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); - activate_ipi(cpu_data[cpu].ipi_pipe[0], + os_set_fd_async(cpu_data[cpu].ipi_pipe[0], current->thread.mode.tt.extern_pid); - + wmb(); if (cpu_test_and_set(cpu, cpu_callin_map)) { printk("huh, CPU#%d already present??\n", cpu); @@ -106,7 +106,7 @@ static struct task_struct *idle_thread(int cpu) panic("copy_process failed in idle_thread, error = %ld", PTR_ERR(new_task)); - cpu_tasks[cpu] = ((struct cpu_task) + cpu_tasks[cpu] = ((struct cpu_task) { .pid = new_task->thread.mode.tt.extern_pid, .task = new_task } ); idle_threads[cpu] = new_task; @@ -134,12 +134,12 @@ void smp_prepare_cpus(unsigned int maxcpus) if(err < 0) panic("CPU#0 failed to create IPI pipe, errno = %d", -err); - activate_ipi(cpu_data[me].ipi_pipe[0], + os_set_fd_async(cpu_data[me].ipi_pipe[0], current->thread.mode.tt.extern_pid); for(cpu = 1; cpu < ncpus; cpu++){ printk("Booting processor %d...\n", cpu); - + idle = idle_thread(cpu); init_idle(idle, cpu); @@ -223,7 +223,7 @@ void smp_call_function_slave(int cpu) atomic_inc(&scf_finished); } -int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, +int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, int wait) { int cpus = num_online_cpus() - 1; diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 80c9c18aae94..7d51dd7201c3 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -421,7 +421,7 @@ int linux_main(int argc, char **argv) #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " - "to %lu bytes\n", physmem_size); + "to %Lu bytes\n", physmem_size); #endif } @@ -433,8 +433,8 @@ int linux_main(int argc, char **argv) setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if(init_maps(physmem_size, iomem_size, highmem)){ - printf("Failed to allocate mem_map for %lu bytes of physical " - "memory and %lu bytes of highmem\n", physmem_size, + printf("Failed to allocate mem_map for %Lu bytes of physical " + "memory and %Lu bytes of highmem\n", physmem_size, highmem); exit(1); } @@ -477,7 +477,8 @@ static struct notifier_block panic_exit_notifier = { void __init setup_arch(char **cmdline_p) { - notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &panic_exit_notifier); paging_init(); strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; @@ -487,8 +488,7 @@ void __init setup_arch(char **cmdline_p) void __init check_bugs(void) { arch_check_bugs(); - check_sigio(); - check_devanon(); + os_check_bugs(); } void apply_alternatives(struct alt_instr *start, struct alt_instr *end) diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile index 08a4e628b24c..1659386b42bb 100644 --- a/arch/um/os-Linux/Makefile +++ b/arch/um/os-Linux/Makefile @@ -3,14 +3,17 @@ # Licensed under the GPL # -obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ - start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \ - util.o drivers/ sys-$(SUBARCH)/ +obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \ + signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \ + user_syms.o util.o drivers/ sys-$(SUBARCH)/ obj-$(CONFIG_MODE_SKAS) += skas/ +obj-$(CONFIG_TTY_LOG) += tty_log.o +user-objs-$(CONFIG_TTY_LOG) += tty_log.o -USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ - start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o util.o +USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \ + process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \ + uaccess.o umid.o util.o elf_aux.o: $(ARCH_DIR)/kernel-offsets.h CFLAGS_elf_aux.o += -I$(objtree)/arch/um diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c new file mode 100644 index 000000000000..e599be423da1 --- /dev/null +++ b/arch/um/os-Linux/irq.c @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <signal.h> +#include <string.h> +#include <sys/poll.h> +#include <sys/types.h> +#include <sys/time.h> +#include "user_util.h" +#include "kern_util.h" +#include "user.h" +#include "process.h" +#include "sigio.h" +#include "irq_user.h" +#include "os.h" + +static struct pollfd *pollfds = NULL; +static int pollfds_num = 0; +static int pollfds_size = 0; + +int os_waiting_for_events(struct irq_fd *active_fds) +{ + struct irq_fd *irq_fd; + int i, n, err; + + n = poll(pollfds, pollfds_num, 0); + if(n < 0){ + err = -errno; + if(errno != EINTR) + printk("sigio_handler: os_waiting_for_events:" + " poll returned %d, errno = %d\n", n, errno); + return err; + } + + if(n == 0) + return 0; + + irq_fd = active_fds; + + for(i = 0; i < pollfds_num; i++){ + if(pollfds[i].revents != 0){ + irq_fd->current_events = pollfds[i].revents; + pollfds[i].fd = -1; + } + irq_fd = irq_fd->next; + } + return n; +} + +int os_isatty(int fd) +{ + return(isatty(fd)); +} + +int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds) +{ + if (pollfds_num == pollfds_size) { + if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) { + /* return min size needed for new pollfds area */ + return((pollfds_size + 1) * sizeof(pollfds[0])); + } + + if(pollfds != NULL){ + memcpy(tmp_pfd, pollfds, + sizeof(pollfds[0]) * pollfds_size); + /* remove old pollfds */ + kfree(pollfds); + } + pollfds = tmp_pfd; + pollfds_size++; + } else { + /* remove not used tmp_pfd */ + if (tmp_pfd != NULL) + kfree(tmp_pfd); + } + + pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, + .events = events, + .revents = 0 }); + pollfds_num++; + + return(0); +} + +void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg, + struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2) +{ + struct irq_fd **prev; + int i = 0; + + prev = &active_fds; + while(*prev != NULL){ + if((*test)(*prev, arg)){ + struct irq_fd *old_fd = *prev; + if((pollfds[i].fd != -1) && + (pollfds[i].fd != (*prev)->fd)){ + printk("os_free_irq_by_cb - mismatch between " + "active_fds and pollfds, fd %d vs %d\n", + (*prev)->fd, pollfds[i].fd); + goto out; + } + + pollfds_num--; + + /* This moves the *whole* array after pollfds[i] + * (though it doesn't spot as such)! + */ + + memmove(&pollfds[i], &pollfds[i + 1], + (pollfds_num - i) * sizeof(pollfds[0])); + if(*last_irq_ptr2 == &old_fd->next) + *last_irq_ptr2 = prev; + + *prev = (*prev)->next; + if(old_fd->type == IRQ_WRITE) + ignore_sigio_fd(old_fd->fd); + kfree(old_fd); + continue; + } + prev = &(*prev)->next; + i++; + } + out: + return; +} + + +int os_get_pollfd(int i) +{ + return(pollfds[i].fd); +} + +void os_set_pollfd(int i, int fd) +{ + pollfds[i].fd = fd; +} + +void os_set_ioignore(void) +{ + set_handler(SIGIO, SIG_IGN, 0, -1); +} + +void init_irq_signals(int on_sigstack) +{ + __sighandler_t h; + int flags; + + flags = on_sigstack ? SA_ONSTACK : 0; + if(timer_irq_inited) h = (__sighandler_t) alarm_handler; + else h = boot_timer_handler; + + set_handler(SIGVTALRM, h, flags | SA_RESTART, + SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1); + set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART, + SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); + signal(SIGWINCH, SIG_IGN); +} diff --git a/arch/um/kernel/sigio_user.c b/arch/um/os-Linux/sigio.c index f7b18e157d35..9ba942947146 100644 --- a/arch/um/kernel/sigio_user.c +++ b/arch/um/os-Linux/sigio.c @@ -1,4 +1,4 @@ -/* +/* * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -20,128 +20,7 @@ #include "sigio.h" #include "os.h" -/* Changed during early boot */ -int pty_output_sigio = 0; -int pty_close_sigio = 0; - -/* Used as a flag during SIGIO testing early in boot */ -static volatile int got_sigio = 0; - -void __init handler(int sig) -{ - got_sigio = 1; -} - -struct openpty_arg { - int master; - int slave; - int err; -}; - -static void openpty_cb(void *arg) -{ - struct openpty_arg *info = arg; - - info->err = 0; - if(openpty(&info->master, &info->slave, NULL, NULL, NULL)) - info->err = -errno; -} - -void __init check_one_sigio(void (*proc)(int, int)) -{ - struct sigaction old, new; - struct openpty_arg pty = { .master = -1, .slave = -1 }; - int master, slave, err; - - initial_thread_cb(openpty_cb, &pty); - if(pty.err){ - printk("openpty failed, errno = %d\n", -pty.err); - return; - } - - master = pty.master; - slave = pty.slave; - - if((master == -1) || (slave == -1)){ - printk("openpty failed to allocate a pty\n"); - return; - } - - /* Not now, but complain so we now where we failed. */ - err = raw(master); - if (err < 0) - panic("check_sigio : __raw failed, errno = %d\n", -err); - - err = os_sigio_async(master, slave); - if(err < 0) - panic("tty_fds : sigio_async failed, err = %d\n", -err); - - if(sigaction(SIGIO, NULL, &old) < 0) - panic("check_sigio : sigaction 1 failed, errno = %d\n", errno); - new = old; - new.sa_handler = handler; - if(sigaction(SIGIO, &new, NULL) < 0) - panic("check_sigio : sigaction 2 failed, errno = %d\n", errno); - - got_sigio = 0; - (*proc)(master, slave); - - os_close_file(master); - os_close_file(slave); - - if(sigaction(SIGIO, &old, NULL) < 0) - panic("check_sigio : sigaction 3 failed, errno = %d\n", errno); -} - -static void tty_output(int master, int slave) -{ - int n; - char buf[512]; - - printk("Checking that host ptys support output SIGIO..."); - - memset(buf, 0, sizeof(buf)); - - while(os_write_file(master, buf, sizeof(buf)) > 0) ; - if(errno != EAGAIN) - panic("check_sigio : write failed, errno = %d\n", errno); - while(((n = os_read_file(slave, buf, sizeof(buf))) > 0) && !got_sigio) ; - - if (got_sigio) { - printk("Yes\n"); - pty_output_sigio = 1; - } else if (n == -EAGAIN) { - printk("No, enabling workaround\n"); - } else { - panic("check_sigio : read failed, err = %d\n", n); - } -} - -static void tty_close(int master, int slave) -{ - printk("Checking that host ptys support SIGIO on close..."); - - os_close_file(slave); - if(got_sigio){ - printk("Yes\n"); - pty_close_sigio = 1; - } - else printk("No, enabling workaround\n"); -} - -void __init check_sigio(void) -{ - if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) && - (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){ - printk("No pseudo-terminals available - skipping pty SIGIO " - "check\n"); - return; - } - check_one_sigio(tty_output); - check_one_sigio(tty_close); -} - -/* Protected by sigio_lock(), also used by sigio_cleanup, which is an +/* Protected by sigio_lock(), also used by sigio_cleanup, which is an * exitcall. */ static int write_sigio_pid = -1; @@ -150,8 +29,10 @@ static int write_sigio_pid = -1; * the descriptors closed after it is killed. So, it can't see them change. * On the UML side, they are changed under the sigio_lock. */ -static int write_sigio_fds[2] = { -1, -1 }; -static int sigio_private[2] = { -1, -1 }; +#define SIGIO_FDS_INIT {-1, -1} + +static int write_sigio_fds[2] = SIGIO_FDS_INIT; +static int sigio_private[2] = SIGIO_FDS_INIT; struct pollfds { struct pollfd *poll; @@ -264,13 +145,13 @@ static void update_thread(void) return; fail: /* Critical section start */ - if(write_sigio_pid != -1) + if(write_sigio_pid != -1) os_kill_process(write_sigio_pid, 1); write_sigio_pid = -1; - os_close_file(sigio_private[0]); - os_close_file(sigio_private[1]); - os_close_file(write_sigio_fds[0]); - os_close_file(write_sigio_fds[1]); + close(sigio_private[0]); + close(sigio_private[1]); + close(write_sigio_fds[0]); + close(write_sigio_fds[1]); /* Critical section end */ set_signals(flags); } @@ -281,13 +162,13 @@ int add_sigio_fd(int fd, int read) sigio_lock(); for(i = 0; i < current_poll.used; i++){ - if(current_poll.poll[i].fd == fd) + if(current_poll.poll[i].fd == fd) goto out; } n = current_poll.used + 1; err = need_poll(n); - if(err) + if(err) goto out; for(i = 0; i < current_poll.used; i++) @@ -316,7 +197,7 @@ int ignore_sigio_fd(int fd) } if(i == current_poll.used) goto out; - + err = need_poll(current_poll.used - 1); if(err) goto out; @@ -337,7 +218,7 @@ int ignore_sigio_fd(int fd) return(err); } -static struct pollfd* setup_initial_poll(int fd) +static struct pollfd *setup_initial_poll(int fd) { struct pollfd *p; @@ -377,7 +258,7 @@ void write_sigio_workaround(void) } err = os_pipe(l_sigio_private, 1, 1); if(err < 0){ - printk("write_sigio_workaround - os_pipe 1 failed, " + printk("write_sigio_workaround - os_pipe 2 failed, " "err = %d\n", -err); goto out_close1; } @@ -391,76 +272,52 @@ void write_sigio_workaround(void) /* Did we race? Don't try to optimize this, please, it's not so likely * to happen, and no more than once at the boot. */ if(write_sigio_pid != -1) - goto out_unlock; + goto out_free; - write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, - CLONE_FILES | CLONE_VM, &stack, 0); - - if (write_sigio_pid < 0) - goto out_clear; + current_poll = ((struct pollfds) { .poll = p, + .used = 1, + .size = 1 }); if (write_sigio_irq(l_write_sigio_fds[0])) - goto out_kill; + goto out_clear_poll; - /* Success, finally. */ memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds)); memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private)); - current_poll = ((struct pollfds) { .poll = p, - .used = 1, - .size = 1 }); + write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, + CLONE_FILES | CLONE_VM, &stack, 0); - sigio_unlock(); - return; + if (write_sigio_pid < 0) + goto out_clear; - out_kill: - l_write_sigio_pid = write_sigio_pid; - write_sigio_pid = -1; sigio_unlock(); - /* Going to call waitpid, avoid holding the lock. */ - os_kill_process(l_write_sigio_pid, 1); - goto out_free; + return; - out_clear: +out_clear: write_sigio_pid = -1; - out_unlock: - sigio_unlock(); - out_free: + write_sigio_fds[0] = -1; + write_sigio_fds[1] = -1; + sigio_private[0] = -1; + sigio_private[1] = -1; +out_clear_poll: + current_poll = ((struct pollfds) { .poll = NULL, + .size = 0, + .used = 0 }); +out_free: kfree(p); - out_close2: - os_close_file(l_sigio_private[0]); - os_close_file(l_sigio_private[1]); - out_close1: - os_close_file(l_write_sigio_fds[0]); - os_close_file(l_write_sigio_fds[1]); - return; -} - -int read_sigio_fd(int fd) -{ - int n; - char c; - - n = os_read_file(fd, &c, sizeof(c)); - if(n != sizeof(c)){ - if(n < 0) { - printk("read_sigio_fd - read failed, err = %d\n", -n); - return(n); - } - else { - printk("read_sigio_fd - short read, bytes = %d\n", n); - return(-EIO); - } - } - return(n); + sigio_unlock(); +out_close2: + close(l_sigio_private[0]); + close(l_sigio_private[1]); +out_close1: + close(l_write_sigio_fds[0]); + close(l_write_sigio_fds[1]); } -static void sigio_cleanup(void) +void sigio_cleanup(void) { - if (write_sigio_pid != -1) { + if(write_sigio_pid != -1){ os_kill_process(write_sigio_pid, 1); write_sigio_pid = -1; } } - -__uml_exitcall(sigio_cleanup); diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index 829d6b0d8b02..32753131f8d8 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -3,6 +3,7 @@ * Licensed under the GPL */ +#include <pty.h> #include <stdio.h> #include <stddef.h> #include <stdarg.h> @@ -539,3 +540,130 @@ int __init parse_iomem(char *str, int *add) return(1); } + +/* Changed during early boot */ +int pty_output_sigio = 0; +int pty_close_sigio = 0; + +/* Used as a flag during SIGIO testing early in boot */ +static volatile int got_sigio = 0; + +static void __init handler(int sig) +{ + got_sigio = 1; +} + +struct openpty_arg { + int master; + int slave; + int err; +}; + +static void openpty_cb(void *arg) +{ + struct openpty_arg *info = arg; + + info->err = 0; + if(openpty(&info->master, &info->slave, NULL, NULL, NULL)) + info->err = -errno; +} + +static void __init check_one_sigio(void (*proc)(int, int)) +{ + struct sigaction old, new; + struct openpty_arg pty = { .master = -1, .slave = -1 }; + int master, slave, err; + + initial_thread_cb(openpty_cb, &pty); + if(pty.err){ + printk("openpty failed, errno = %d\n", -pty.err); + return; + } + + master = pty.master; + slave = pty.slave; + + if((master == -1) || (slave == -1)){ + printk("openpty failed to allocate a pty\n"); + return; + } + + /* Not now, but complain so we now where we failed. */ + err = raw(master); + if (err < 0) + panic("check_sigio : __raw failed, errno = %d\n", -err); + + err = os_sigio_async(master, slave); + if(err < 0) + panic("tty_fds : sigio_async failed, err = %d\n", -err); + + if(sigaction(SIGIO, NULL, &old) < 0) + panic("check_sigio : sigaction 1 failed, errno = %d\n", errno); + new = old; + new.sa_handler = handler; + if(sigaction(SIGIO, &new, NULL) < 0) + panic("check_sigio : sigaction 2 failed, errno = %d\n", errno); + + got_sigio = 0; + (*proc)(master, slave); + + close(master); + close(slave); + + if(sigaction(SIGIO, &old, NULL) < 0) + panic("check_sigio : sigaction 3 failed, errno = %d\n", errno); +} + +static void tty_output(int master, int slave) +{ + int n; + char buf[512]; + + printk("Checking that host ptys support output SIGIO..."); + + memset(buf, 0, sizeof(buf)); + + while(os_write_file(master, buf, sizeof(buf)) > 0) ; + if(errno != EAGAIN) + panic("check_sigio : write failed, errno = %d\n", errno); + while(((n = os_read_file(slave, buf, sizeof(buf))) > 0) && !got_sigio) ; + + if(got_sigio){ + printk("Yes\n"); + pty_output_sigio = 1; + } + else if(n == -EAGAIN) printk("No, enabling workaround\n"); + else panic("check_sigio : read failed, err = %d\n", n); +} + +static void tty_close(int master, int slave) +{ + printk("Checking that host ptys support SIGIO on close..."); + + close(slave); + if(got_sigio){ + printk("Yes\n"); + pty_close_sigio = 1; + } + else printk("No, enabling workaround\n"); +} + +void __init check_sigio(void) +{ + if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) && + (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){ + printk("No pseudo-terminals available - skipping pty SIGIO " + "check\n"); + return; + } + check_one_sigio(tty_output); + check_one_sigio(tty_close); +} + +void os_check_bugs(void) +{ + check_ptrace(); + check_sigio(); + check_devanon(); +} + diff --git a/arch/um/os-Linux/tt.c b/arch/um/os-Linux/tt.c index 919d19f11537..5461a065bbb9 100644 --- a/arch/um/os-Linux/tt.c +++ b/arch/um/os-Linux/tt.c @@ -110,6 +110,16 @@ int wait_for_stop(int pid, int sig, int cont_type, void *relay) } } +void forward_ipi(int fd, int pid) +{ + int err; + + err = os_set_owner(fd, pid); + if(err < 0) + printk("forward_ipi: set_owner failed, fd = %d, me = %d, " + "target = %d, err = %d\n", fd, os_getpid(), pid, -err); +} + /* *------------------------- * only for tt mode (will be deleted in future...) diff --git a/arch/um/kernel/tty_log.c b/arch/um/os-Linux/tty_log.c index 9ada656f68ce..c6ba56c1560f 100644 --- a/arch/um/kernel/tty_log.c +++ b/arch/um/os-Linux/tty_log.c @@ -1,5 +1,5 @@ -/* - * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) and +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) and * geoffrey hing <ghing@net.ohio-state.edu> * Licensed under the GPL */ @@ -58,7 +58,7 @@ int open_tty_log(void *tty, void *current_tty) return(tty_log_fd); } - sprintf(buf, "%s/%0u-%0u", tty_log_dir, (unsigned int) tv.tv_sec, + sprintf(buf, "%s/%0u-%0u", tty_log_dir, (unsigned int) tv.tv_sec, (unsigned int) tv.tv_usec); fd = os_open_file(buf, of_append(of_create(of_rdwr(OPENFLAGS()))), @@ -216,15 +216,3 @@ __uml_setup("tty_log_fd=", set_tty_log_fd, " tty data will be written. Preconfigure the descriptor with something\n" " like '10>tty_log tty_log_fd=10'.\n\n" ); - - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c index ecf107ae5ac8..198e59163288 100644 --- a/arch/um/os-Linux/umid.c +++ b/arch/um/os-Linux/umid.c @@ -143,8 +143,10 @@ static int not_dead_yet(char *dir) goto out_close; } - if((kill(p, 0) == 0) || (errno != ESRCH)) + if((kill(p, 0) == 0) || (errno != ESRCH)){ + printk("umid \"%s\" is already in use by pid %d\n", umid, p); return 1; + } err = actually_do_remove(dir); if(err) @@ -234,33 +236,44 @@ int __init make_umid(void) err = mkdir(tmp, 0777); if(err < 0){ err = -errno; - if(errno != EEXIST) + if(err != -EEXIST) goto err; - if(not_dead_yet(tmp) < 0) + /* 1 -> this umid is already in use + * < 0 -> we couldn't remove the umid directory + * In either case, we can't use this umid, so return -EEXIST. + */ + if(not_dead_yet(tmp) != 0) goto err; err = mkdir(tmp, 0777); } - if(err < 0){ - printk("Failed to create '%s' - err = %d\n", umid, err); - goto err_rmdir; + if(err){ + err = -errno; + printk("Failed to create '%s' - err = %d\n", umid, -errno); + goto err; } umid_setup = 1; create_pid_file(); - return 0; - - err_rmdir: - rmdir(tmp); + err = 0; err: return err; } static int __init make_umid_init(void) { + if(!make_umid()) + return 0; + + /* If initializing with the given umid failed, then try again with + * a random one. + */ + printk("Failed to initialize umid \"%s\", trying with a random umid\n", + umid); + *umid = '\0'; make_umid(); return 0; diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index e839ce65ad28..8032a105949a 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c @@ -6,6 +6,7 @@ #include <linux/config.h> #include <linux/compiler.h> #include "linux/sched.h" +#include "linux/mm.h" #include "asm/elf.h" #include "asm/ptrace.h" #include "asm/uaccess.h" @@ -26,9 +27,17 @@ int is_syscall(unsigned long addr) n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); if(n){ - printk("is_syscall : failed to read instruction from 0x%lx\n", - addr); - return(0); + /* access_process_vm() grants access to vsyscall and stub, + * while copy_from_user doesn't. Maybe access_process_vm is + * slow, but that doesn't matter, since it will be called only + * in case of singlestepping, if copy_from_user failed. + */ + n = access_process_vm(current, addr, &instr, sizeof(instr), 0); + if(n != sizeof(instr)) { + printk("is_syscall : failed to read instruction from " + "0x%lx\n", addr); + return(1); + } } /* int 0x80 or sysenter */ return((instr == 0x80cd) || (instr == 0x340f)); diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c index 7cd1a82dc8c2..33a40f5ef0d2 100644 --- a/arch/um/sys-i386/signal.c +++ b/arch/um/sys-i386/signal.c @@ -58,7 +58,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs, } int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, - struct pt_regs *regs) + struct pt_regs *regs, unsigned long sp) { struct sigcontext sc; unsigned long fpregs[HOST_FP_SIZE]; @@ -72,7 +72,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, sc.edi = REGS_EDI(regs->regs.skas.regs); sc.esi = REGS_ESI(regs->regs.skas.regs); sc.ebp = REGS_EBP(regs->regs.skas.regs); - sc.esp = REGS_SP(regs->regs.skas.regs); + sc.esp = sp; sc.ebx = REGS_EBX(regs->regs.skas.regs); sc.edx = REGS_EDX(regs->regs.skas.regs); sc.ecx = REGS_ECX(regs->regs.skas.regs); @@ -132,7 +132,7 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, } int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, - struct sigcontext *from, int fpsize) + struct sigcontext *from, int fpsize, unsigned long sp) { struct _fpstate *to_fp, *from_fp; int err; @@ -140,11 +140,18 @@ int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); from_fp = from->fpstate; err = copy_to_user(to, from, sizeof(*to)); + + /* The SP in the sigcontext is the updated one for the signal + * delivery. The sp passed in is the original, and this needs + * to be restored, so we stick it in separately. + */ + err |= copy_to_user(&SC_SP(to), sp, sizeof(sp)); + if(from_fp != NULL){ err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate)); err |= copy_to_user(to_fp, from_fp, fpsize); } - return(err); + return err; } #endif @@ -159,11 +166,11 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from) } static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, - struct pt_regs *from) + struct pt_regs *from, unsigned long sp) { return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), - sizeof(*fp)), - copy_sc_to_user_skas(to, fp, from))); + sizeof(*fp), sp), + copy_sc_to_user_skas(to, fp, from, sp))); } static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, @@ -174,7 +181,7 @@ static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); - err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs); + err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, sp); err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); return(err); } @@ -207,6 +214,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, { struct sigframe __user *frame; void *restorer; + unsigned long save_sp = PT_REGS_SP(regs); int err = 0; stack_top &= -8UL; @@ -218,9 +226,19 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, if(ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; + /* Update SP now because the page fault handler refuses to extend + * the stack if the faulting address is too far below the current + * SP, which frame now certainly is. If there's an error, the original + * value is restored on the way out. + * When writing the sigcontext to the stack, we have to write the + * original value, so that's passed to copy_sc_to_user, which does + * the right thing with it. + */ + PT_REGS_SP(regs) = (unsigned long) frame; + err |= __put_user(restorer, &frame->pretcode); err |= __put_user(sig, &frame->sig); - err |= copy_sc_to_user(&frame->sc, NULL, regs); + err |= copy_sc_to_user(&frame->sc, NULL, regs, save_sp); err |= __put_user(mask->sig[0], &frame->sc.oldmask); if (_NSIG_WORDS > 1) err |= __copy_to_user(&frame->extramask, &mask->sig[1], @@ -238,7 +256,7 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); if(err) - return(err); + goto err; PT_REGS_SP(regs) = (unsigned long) frame; PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; @@ -248,7 +266,11 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) ptrace_notify(SIGTRAP); - return(0); + return 0; + +err: + PT_REGS_SP(regs) = save_sp; + return err; } int setup_signal_stack_si(unsigned long stack_top, int sig, @@ -257,6 +279,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, { struct rt_sigframe __user *frame; void *restorer; + unsigned long save_sp = PT_REGS_SP(regs); int err = 0; stack_top &= -8UL; @@ -268,13 +291,16 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, if(ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; + /* See comment above about why this is here */ + PT_REGS_SP(regs) = (unsigned long) frame; + err |= __put_user(restorer, &frame->pretcode); err |= __put_user(sig, &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask, - PT_REGS_SP(regs)); + save_sp); /* * This is movl $,%eax ; int $0x80 @@ -288,9 +314,8 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); if(err) - return(err); + goto err; - PT_REGS_SP(regs) = (unsigned long) frame; PT_REGS_IP(regs) = (unsigned long) ka->sa.sa_handler; PT_REGS_EAX(regs) = (unsigned long) sig; PT_REGS_EDX(regs) = (unsigned long) &frame->info; @@ -298,7 +323,11 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) ptrace_notify(SIGTRAP); - return(0); + return 0; + +err: + PT_REGS_SP(regs) = save_sp; + return err; } long sys_sigreturn(struct pt_regs regs) diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c index 26b68675053d..6f4ef2b7fa4a 100644 --- a/arch/um/sys-i386/user-offsets.c +++ b/arch/um/sys-i386/user-offsets.c @@ -3,12 +3,13 @@ #include <asm/ptrace.h> #include <asm/user.h> #include <linux/stddef.h> +#include <sys/poll.h> #define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) #define DEFINE_LONGS(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) + asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)); @@ -67,4 +68,9 @@ void foo(void) DEFINE(HOST_ES, ES); DEFINE(HOST_GS, GS); DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); + + /* XXX Duplicated between i386 and x86_64 */ + DEFINE(UM_POLLIN, POLLIN); + DEFINE(UM_POLLPRI, POLLPRI); + DEFINE(UM_POLLOUT, POLLOUT); } diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index 74eee5c7c6dd..147bbf05cbc2 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c @@ -8,6 +8,7 @@ #include <asm/ptrace.h> #include <linux/sched.h> #include <linux/errno.h> +#include <linux/mm.h> #include <asm/uaccess.h> #include <asm/elf.h> @@ -136,9 +137,28 @@ void arch_switch(void) */ } +/* XXX Mostly copied from sys-i386 */ int is_syscall(unsigned long addr) { - panic("is_syscall"); + unsigned short instr; + int n; + + n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); + if(n){ + /* access_process_vm() grants access to vsyscall and stub, + * while copy_from_user doesn't. Maybe access_process_vm is + * slow, but that doesn't matter, since it will be called only + * in case of singlestepping, if copy_from_user failed. + */ + n = access_process_vm(current, addr, &instr, sizeof(instr), 0); + if(n != sizeof(instr)) { + printk("is_syscall : failed to read instruction from " + "0x%lx\n", addr); + return(1); + } + } + /* sysenter */ + return(instr == 0x050f); } int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c index fe1d065332b1..e75c4e1838b0 100644 --- a/arch/um/sys-x86_64/signal.c +++ b/arch/um/sys-x86_64/signal.c @@ -55,7 +55,8 @@ static int copy_sc_from_user_skas(struct pt_regs *regs, } int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, - struct pt_regs *regs, unsigned long mask) + struct pt_regs *regs, unsigned long mask, + unsigned long sp) { struct faultinfo * fi = ¤t->thread.arch.faultinfo; int err = 0; @@ -70,7 +71,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, err |= PUTREG(regs, RDI, to, rdi); err |= PUTREG(regs, RSI, to, rsi); err |= PUTREG(regs, RBP, to, rbp); - err |= PUTREG(regs, RSP, to, rsp); + /* Must use orignal RSP, which is passed in, rather than what's in + * the pt_regs, because that's already been updated to point at the + * signal frame. + */ + err |= __put_user(sp, &to->rsp); err |= PUTREG(regs, RBX, to, rbx); err |= PUTREG(regs, RDX, to, rdx); err |= PUTREG(regs, RCX, to, rcx); @@ -102,7 +107,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, #ifdef CONFIG_MODE_TT int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, - int fpsize) + int fpsize) { struct _fpstate *to_fp, *from_fp; unsigned long sigs; @@ -120,7 +125,7 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, } int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, - struct sigcontext *from, int fpsize) + struct sigcontext *from, int fpsize, unsigned long sp) { struct _fpstate *to_fp, *from_fp; int err; @@ -128,11 +133,17 @@ int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); from_fp = from->fpstate; err = copy_to_user(to, from, sizeof(*to)); + /* The SP in the sigcontext is the updated one for the signal + * delivery. The sp passed in is the original, and this needs + * to be restored, so we stick it in separately. + */ + err |= copy_to_user(&SC_SP(to), sp, sizeof(sp)); + if(from_fp != NULL){ err |= copy_to_user(&to->fpstate, &to_fp, sizeof(to->fpstate)); err |= copy_to_user(to_fp, from_fp, fpsize); } - return(err); + return err; } #endif @@ -148,11 +159,12 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from) } static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, - struct pt_regs *from, unsigned long mask) + struct pt_regs *from, unsigned long mask, + unsigned long sp) { return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), - sizeof(*fp)), - copy_sc_to_user_skas(to, fp, from, mask))); + sizeof(*fp), sp), + copy_sc_to_user_skas(to, fp, from, mask, sp))); } struct rt_sigframe @@ -170,6 +182,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, { struct rt_sigframe __user *frame; struct _fpstate __user *fp = NULL; + unsigned long save_sp = PT_REGS_RSP(regs); int err = 0; struct task_struct *me = current; @@ -193,14 +206,25 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, goto out; } + /* Update SP now because the page fault handler refuses to extend + * the stack if the faulting address is too far below the current + * SP, which frame now certainly is. If there's an error, the original + * value is restored on the way out. + * When writing the sigcontext to the stack, we have to write the + * original value, so that's passed to copy_sc_to_user, which does + * the right thing with it. + */ + PT_REGS_RSP(regs) = (unsigned long) frame; + /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(PT_REGS_SP(regs)), + err |= __put_user(sas_ss_flags(save_sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= copy_sc_to_user(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); + err |= copy_sc_to_user(&frame->uc.uc_mcontext, fp, regs, set->sig[0], + save_sp); err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); if (sizeof(*set) == 16) { __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); @@ -217,10 +241,10 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); else /* could use a vstub here */ - goto out; + goto restore_sp; if (err) - goto out; + goto restore_sp; /* Set up registers for signal handler */ { @@ -238,10 +262,12 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, PT_REGS_RSI(regs) = (unsigned long) &frame->info; PT_REGS_RDX(regs) = (unsigned long) &frame->uc; PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler; - - PT_REGS_RSP(regs) = (unsigned long) frame; out: - return(err); + return err; + +restore_sp: + PT_REGS_RSP(regs) = save_sp; + return err; } long sys_rt_sigreturn(struct pt_regs *regs) diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c index 7bd54a921cf7..899cebb57c3f 100644 --- a/arch/um/sys-x86_64/user-offsets.c +++ b/arch/um/sys-x86_64/user-offsets.c @@ -1,6 +1,7 @@ #include <stdio.h> #include <stddef.h> #include <signal.h> +#include <sys/poll.h> #define __FRAME_OFFSETS #include <asm/ptrace.h> #include <asm/types.h> @@ -88,4 +89,9 @@ void foo(void) DEFINE_LONGS(HOST_IP, RIP); DEFINE_LONGS(HOST_SP, RSP); DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); + + /* XXX Duplicated between i386 and x86_64 */ + DEFINE(UM_POLLIN, POLLIN); + DEFINE(UM_POLLPRI, POLLPRI); + DEFINE(UM_POLLOUT, POLLOUT); } diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index e7fc3e500342..37ec644603ab 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig @@ -16,6 +16,12 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool default n +config GENERIC_FIND_NEXT_BIT + bool + default y +config GENERIC_HWEIGHT + bool + default y config GENERIC_CALIBRATE_DELAY bool default y diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 6420baeb8c1f..4310b4a311a5 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -45,6 +45,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -246,6 +250,15 @@ config SCHED_SMT cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config NUMA @@ -321,6 +334,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID def_bool y depends on NUMA +config OUT_OF_LINE_PFN_TO_PAGE + def_bool y + depends on DISCONTIGMEM + config NR_CPUS int "Maximum number of CPUs (2-256)" range 2 255 diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 0fbc0283609c..585fd4a559c8 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile @@ -70,7 +70,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/ boot := arch/x86_64/boot PHONY += bzImage bzlilo install archmrproper \ - fdimage fdimage144 fdimage288 archclean + fdimage fdimage144 fdimage288 isoimage archclean #Default target when executing "make" all: bzImage @@ -87,7 +87,7 @@ bzlilo: vmlinux bzdisk: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk -fdimage fdimage144 fdimage288: vmlinux +fdimage fdimage144 fdimage288 isoimage: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ install: @@ -99,11 +99,16 @@ archclean: define archhelp echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)' echo ' install - Install kernel using' - echo ' (your) ~/bin/installkernel or' - echo ' (distribution) /sbin/installkernel or' - echo ' install to $$(INSTALL_PATH) and run lilo' + echo ' (your) ~/bin/installkernel or' + echo ' (distribution) /sbin/installkernel or' + echo ' install to $$(INSTALL_PATH) and run lilo' + echo ' bzdisk - Create a boot floppy in /dev/fd0' + echo ' fdimage - Create a boot floppy image' + echo ' isoimage - Create a boot CD-ROM image' endef -CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf +CLEAN_FILES += arch/$(ARCH)/boot/fdimage \ + arch/$(ARCH)/boot/image.iso \ + arch/$(ARCH)/boot/mtools.conf diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile index 29f8396ed151..43ee6c50c277 100644 --- a/arch/x86_64/boot/Makefile +++ b/arch/x86_64/boot/Makefile @@ -60,8 +60,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ -# Set this if you want to pass append arguments to the zdisk/fdimage kernel +# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel FDARGS = +# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel +FDINITRD = + +image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) $(obj)/mtools.conf: $(src)/mtools.conf.in sed -e 's|@OBJ@|$(obj)|g' < $< > $@ @@ -70,8 +74,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in zdisk: $(BOOTIMAGE) $(obj)/mtools.conf MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync syslinux /dev/fd0 ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync # These require being root or having syslinux 2.02 or higher installed @@ -79,18 +86,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync +isoimage: $(BOOTIMAGE) + -rm -rf $(obj)/isoimage + mkdir $(obj)/isoimage + cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ + $(obj)/isoimage + cp $(BOOTIMAGE) $(obj)/isoimage/linux + echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ + fi + mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \ + -no-emul-boot -boot-load-size 4 -boot-info-table \ + $(obj)/isoimage + rm -rf $(obj)/isoimage + zlilo: $(BOOTIMAGE) if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 00dee176c08e..35b2faccdc6c 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S @@ -501,7 +501,7 @@ ia32_sys_call_table: .quad sys_setdomainname .quad sys_uname .quad sys_modify_ldt - .quad sys32_adjtimex + .quad compat_sys_adjtimex .quad sys32_mprotect /* 125 */ .quad compat_sys_sigprocmask .quad quiet_ni_syscall /* create_module */ @@ -688,6 +688,8 @@ ia32_sys_call_table: .quad sys_ni_syscall /* pselect6 for now */ .quad sys_ni_syscall /* ppoll for now */ .quad sys_unshare /* 310 */ + .quad compat_sys_set_robust_list + .quad compat_sys_get_robust_list ia32_syscall_end: .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 .quad ni_syscall diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index 2b2d029f477c..f182b20858e2 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c @@ -30,7 +30,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -767,82 +766,6 @@ sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) return ret; } -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long -sys32_adjtimex(struct timex32 __user *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if (!access_ok(VERIFY_READ, utp, sizeof(struct timex32)) || - __get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if (!access_ok(VERIFY_WRITE, utp, sizeof(struct timex32)) || - __put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} - asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 14f0ced613b6..accbff3fec49 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -37,10 +37,12 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/preempt.h> +#include <linux/module.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/kdebug.h> +#include <asm/uaccess.h> void jprobe_return_end(void); static void __kprobes arch_copy_kprobe(struct kprobe *p); @@ -578,16 +580,62 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *fixup; - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs, kcb); + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the rip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->rip = (unsigned long)cur->addr; regs->eflags |= kcb->kprobe_old_rflags; - - reset_current_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + fixup = search_exception_tables(regs->rip); + if (fixup) { + regs->rip = fixup->fixup; + return 1; + } + + /* + * fixup() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } return 0; } @@ -601,6 +649,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_INT3: if (kprobe_handler(args->regs)) diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 81111835722d..70dd8e5c6889 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -35,8 +35,8 @@ #include <linux/ptrace.h> #include <linux/utsname.h> #include <linux/random.h> -#include <linux/kprobes.h> #include <linux/notifier.h> +#include <linux/kprobes.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override); void (*pm_idle)(void); static DEFINE_PER_CPU(unsigned int, cpu_idle_state); -static struct notifier_block *idle_notifier; -static DEFINE_SPINLOCK(idle_notifier_lock); +static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) { - unsigned long flags; - spin_lock_irqsave(&idle_notifier_lock, flags); - notifier_chain_register(&idle_notifier, n); - spin_unlock_irqrestore(&idle_notifier_lock, flags); + atomic_notifier_chain_register(&idle_notifier, n); } EXPORT_SYMBOL_GPL(idle_notifier_register); void idle_notifier_unregister(struct notifier_block *n) { - unsigned long flags; - spin_lock_irqsave(&idle_notifier_lock, flags); - notifier_chain_unregister(&idle_notifier, n); - spin_unlock_irqrestore(&idle_notifier_lock, flags); + atomic_notifier_chain_unregister(&idle_notifier, n); } EXPORT_SYMBOL(idle_notifier_unregister); @@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE; void enter_idle(void) { __get_cpu_var(idle_state) = CPU_IDLE; - notifier_call_chain(&idle_notifier, IDLE_START, NULL); + atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); } static void __exit_idle(void) { __get_cpu_var(idle_state) = CPU_NOT_IDLE; - notifier_call_chain(&idle_notifier, IDLE_END, NULL); + atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); } /* Called from interrupts to signify idle end */ @@ -353,13 +346,6 @@ void exit_thread(void) struct task_struct *me = current; struct thread_struct *t = &me->thread; - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(me); - if (me->thread.io_bitmap_ptr) { struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index a57eec8311a7..d1f3e9272c05 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -962,7 +962,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) cpuid(1, &eax, &ebx, &ecx, &edx); - c->apicid = phys_pkg_id(0); if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; @@ -1171,6 +1170,8 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) c->x86_capability[2] = cpuid_edx(0x80860001); } + c->apicid = phys_pkg_id(0); + /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 66e98659d077..ea48fa638070 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -68,6 +68,9 @@ u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; /* core ID of each logical CPU */ u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; +/* Last level cache ID of each logical CPU */ +u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; + /* Bitmask of currently online CPUs */ cpumask_t cpu_online_map __read_mostly; @@ -445,6 +448,18 @@ void __cpuinit smp_callin(void) cpu_set(cpuid, cpu_callin_map); } +/* maps the cpu to the sched domain representing multi-core */ +cpumask_t cpu_coregroup_map(int cpu) +{ + struct cpuinfo_x86 *c = cpu_data + cpu; + /* + * For perf, we return last level cache shared map. + * TBD: when power saving sched policy is added, we will return + * cpu_core_map when power saving policy is enabled + */ + return c->llc_shared_map; +} + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -463,12 +478,16 @@ static inline void set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_map[i]); cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); + cpu_set(i, c[cpu].llc_shared_map); + cpu_set(cpu, c[i].llc_shared_map); } } } else { cpu_set(cpu, cpu_sibling_map[cpu]); } + cpu_set(cpu, c[cpu].llc_shared_map); + if (current_cpu_data.x86_max_cores == 1) { cpu_core_map[cpu] = cpu_sibling_map[cpu]; c[cpu].booted_cores = 1; @@ -476,6 +495,11 @@ static inline void set_cpu_sibling_map(int cpu) } for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (cpu_llc_id[cpu] != BAD_APICID && + cpu_llc_id[cpu] == cpu_llc_id[i]) { + cpu_set(i, c[cpu].llc_shared_map); + cpu_set(cpu, c[i].llc_shared_map); + } if (phys_proc_id[cpu] == phys_proc_id[i]) { cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 7f58fa682491..473b514b66e4 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -504,42 +504,25 @@ unsigned long long sched_clock(void) static unsigned long get_cmos_time(void) { - unsigned int timeout = 1000000, year, mon, day, hour, min, sec; - unsigned char uip = 0, this = 0; + unsigned int year, mon, day, hour, min, sec; unsigned long flags; unsigned extyear = 0; -/* - * The Linux interpretation of the CMOS clock register contents: When the - * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the - * second which has precisely just started. Waiting for this can take up to 1 - * second, we timeout approximately after 2.4 seconds on a machine with - * standard 8.3 MHz ISA bus. - */ - spin_lock_irqsave(&rtc_lock, flags); - while (timeout && (!uip || this)) { - uip |= this; - this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; - timeout--; - } - - /* - * Here we are safe to assume the registers won't change for a whole - * second, so we just go ahead and read them. - */ - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - + do { + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); #ifdef CONFIG_ACPI - if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century) - extyear = CMOS_READ(acpi_fadt.century); + if (acpi_fadt.revision >= FADT2_REVISION_ID && + acpi_fadt.century) + extyear = CMOS_READ(acpi_fadt.century); #endif + } while (sec != CMOS_READ(RTC_SECONDS)); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 7b148309c529..edaa9fe654dc 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -69,20 +69,20 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); asmlinkage void spurious_interrupt_bug(void); -struct notifier_block *die_chain; -static DEFINE_SPINLOCK(die_notifier_lock); +ATOMIC_NOTIFIER_HEAD(die_chain); int register_die_notifier(struct notifier_block *nb) { - int err = 0; - unsigned long flags; - vmalloc_sync_all(); - spin_lock_irqsave(&die_notifier_lock, flags); - err = notifier_chain_register(&die_chain, nb); - spin_unlock_irqrestore(&die_notifier_lock, flags); - return err; + return atomic_notifier_chain_register(&die_chain, nb); +} +EXPORT_SYMBOL(register_die_notifier); + +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&die_chain, nb); } +EXPORT_SYMBOL(unregister_die_notifier); static inline void conditional_sti(struct pt_regs *regs) { diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index b04415625442..e5f7f1c34462 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -72,7 +72,7 @@ void show_mem(void) show_free_areas(); printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { for (i = 0; i < pgdat->node_spanned_pages; ++i) { page = pfn_to_page(pgdat->node_start_pfn + i); total++; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 63c72641b737..4be82d6e2b48 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -377,21 +377,6 @@ EXPORT_SYMBOL(node_data); * Should do that. */ -/* Requires pfn_valid(pfn) to be true */ -struct page *pfn_to_page(unsigned long pfn) -{ - int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); - return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; -} -EXPORT_SYMBOL(pfn_to_page); - -unsigned long page_to_pfn(struct page *page) -{ - return (long)(((page) - page_zone(page)->zone_mem_map) + - page_zone(page)->zone_start_pfn); -} -EXPORT_SYMBOL(page_to_pfn); - int pfn_valid(unsigned long pfn) { unsigned nid; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e90ef5db8913..dbeb3504c3c8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_HARDIRQS bool default y diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c index 2e6dcbf0cc04..23790a5610e2 100644 --- a/arch/xtensa/platform-iss/setup.c +++ b/arch/xtensa/platform-iss/setup.c @@ -108,5 +108,5 @@ static struct notifier_block iss_panic_block = { void __init platform_setup(char **p_cmdline) { - notifier_chain_register(&panic_notifier_list, &iss_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block); } diff --git a/block/Kconfig b/block/Kconfig index 96783645092d..5536839886ff 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -13,6 +13,7 @@ config LBD config BLK_DEV_IO_TRACE bool "Support for tracing block io actions" + depends on SYSFS select RELAY select DEBUG_FS help @@ -23,4 +24,13 @@ config BLK_DEV_IO_TRACE git://brick.kernel.dk/data/git/blktrace.git +config LSF + bool "Support for Large Single Files" + depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML + default n + help + When CONFIG_LBD is disabled, say Y here if you want to + handle large file(bigger than 2TB), otherwise say N. + When CONFIG_LBD is enabled, Y is set automatically. + source block/Kconfig.iosched diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c4a0d5d8d7f0..67d446de0227 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -26,18 +26,12 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; -static int cfq_slice_idle = HZ / 100; +static int cfq_slice_idle = HZ / 70; #define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_SLICE_SCALE (5) #define CFQ_KEY_ASYNC (0) -#define CFQ_KEY_ANY (0xffff) - -/* - * disable queueing at the driver/hardware level - */ -static const int cfq_max_depth = 2; static DEFINE_RWLOCK(cfq_exit_lock); @@ -102,6 +96,8 @@ static struct completion *ioc_gone; #define cfq_cfqq_sync(cfqq) \ (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) +#define sample_valid(samples) ((samples) > 80) + /* * Per block device queue structure */ @@ -170,7 +166,6 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; - unsigned int cfq_max_depth; struct list_head cic_list; }; @@ -343,17 +338,27 @@ static int cfq_queue_empty(request_queue_t *q) return !cfqd->busy_queues; } +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) +{ + if (rw == READ || process_sync(task)) + return task->pid; + + return CFQ_KEY_ASYNC; +} + /* * Lifted from AS - choose which of crq1 and crq2 that is best served now. * We choose the request that is closest to the head right now. Distance - * behind the head are penalized and only allowed to a certain extent. + * behind the head is penalized and only allowed to a certain extent. */ static struct cfq_rq * cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) { sector_t last, s1, s2, d1 = 0, d2 = 0; - int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */ unsigned long back_max; +#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ if (crq1 == NULL || crq1 == crq2) return crq2; @@ -385,35 +390,47 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else - r1_wrap = 1; + wrap |= CFQ_RQ1_WRAP; if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else - r2_wrap = 1; + wrap |= CFQ_RQ2_WRAP; /* Found required data */ - if (!r1_wrap && r2_wrap) - return crq1; - else if (!r2_wrap && r1_wrap) - return crq2; - else if (r1_wrap && r2_wrap) { - /* both behind the head */ - if (s1 <= s2) + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: crq1 and crq2 not wrapped */ + if (d1 < d2) return crq1; - else + else if (d2 < d1) return crq2; - } + else { + if (s1 >= s2) + return crq1; + else + return crq2; + } - /* Both requests in front of the head */ - if (d1 < d2) + case CFQ_RQ2_WRAP: return crq1; - else if (d2 < d1) + case CFQ_RQ1_WRAP: return crq2; - else { - if (s1 >= s2) + case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) return crq1; else return crq2; @@ -612,15 +629,20 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) cfq_add_crq_rb(crq); } -static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) - +static struct request * +cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { - struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); + struct task_struct *tsk = current; + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); + struct cfq_queue *cfqq; struct rb_node *n; + sector_t sector; + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); if (!cfqq) goto out; + sector = bio->bi_sector + bio_sectors(bio); n = cfqq->sort_list.rb_node; while (n) { struct cfq_rq *crq = rb_entry_crq(n); @@ -674,7 +696,7 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) goto out; } - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); + __rq = cfq_find_rq_fmerge(cfqd, bio); if (__rq && elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; @@ -877,6 +899,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + struct cfq_io_context *cic; unsigned long sl; WARN_ON(!RB_EMPTY(&cfqq->sort_list)); @@ -892,13 +915,23 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * task has exited, don't wait */ - if (cfqd->active_cic && !cfqd->active_cic->ioc->task) + cic = cfqd->active_cic; + if (!cic || !cic->ioc->task) return 0; cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); + + /* + * we don't want to idle for seeks, but we do want to allow + * fair distribution of slice time for a process doing back-to-back + * seeks. so allow a little bit of time for him to submit a new rq + */ + if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) + sl = 2; + mod_timer(&cfqd->idle_slice_timer, jiffies + sl); return 1; } @@ -1115,13 +1148,6 @@ cfq_dispatch_requests(request_queue_t *q, int force) if (cfqq) { int max_dispatch; - /* - * if idle window is disabled, allow queue buildup - */ - if (!cfq_cfqq_idle_window(cfqq) && - cfqd->rq_in_driver >= cfqd->cfq_max_depth) - return 0; - cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); @@ -1171,13 +1197,13 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, const int hashval) { struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; - struct hlist_node *entry, *next; + struct hlist_node *entry; + struct cfq_queue *__cfqq; - hlist_for_each_safe(entry, next, hash_list) { - struct cfq_queue *__cfqq = list_entry_qhash(entry); + hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) { const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); - if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) + if (__cfqq->key == key && (__p == prio || !prio)) return __cfqq; } @@ -1190,19 +1216,19 @@ cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); } -static void cfq_free_io_context(struct cfq_io_context *cic) +static void cfq_free_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry, *next; - int freed = 1; + struct rb_node *n; + int freed = 0; - list_for_each_safe(entry, next, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + while ((n = rb_first(&ioc->cic_root)) != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); + rb_erase(&__cic->rb_node, &ioc->cic_root); kmem_cache_free(cfq_ioc_pool, __cic); freed++; } - kmem_cache_free(cfq_ioc_pool, cic); if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) complete(ioc_gone); } @@ -1210,8 +1236,7 @@ static void cfq_free_io_context(struct cfq_io_context *cic) static void cfq_trim(struct io_context *ioc) { ioc->set_ioprio = NULL; - if (ioc->cic) - cfq_free_io_context(ioc->cic); + cfq_free_io_context(ioc); } /* @@ -1250,26 +1275,26 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) spin_unlock(q->queue_lock); } -static void cfq_exit_io_context(struct cfq_io_context *cic) +static void cfq_exit_io_context(struct io_context *ioc) { struct cfq_io_context *__cic; - struct list_head *entry; unsigned long flags; - - local_irq_save(flags); + struct rb_node *n; /* * put the reference this task is holding to the various queues */ - read_lock(&cfq_exit_lock); - list_for_each(entry, &cic->list) { - __cic = list_entry(entry, struct cfq_io_context, list); + read_lock_irqsave(&cfq_exit_lock, flags); + + n = rb_first(&ioc->cic_root); + while (n != NULL) { + __cic = rb_entry(n, struct cfq_io_context, rb_node); + cfq_exit_single_io_context(__cic); + n = rb_next(n); } - cfq_exit_single_io_context(cic); - read_unlock(&cfq_exit_lock); - local_irq_restore(flags); + read_unlock_irqrestore(&cfq_exit_lock, flags); } static struct cfq_io_context * @@ -1278,10 +1303,10 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - INIT_LIST_HEAD(&cic->list); + RB_CLEAR(&cic->rb_node); + cic->key = NULL; cic->cfqq[ASYNC] = NULL; cic->cfqq[SYNC] = NULL; - cic->key = NULL; cic->last_end_request = jiffies; cic->ttime_total = 0; cic->ttime_samples = 0; @@ -1373,15 +1398,17 @@ static inline void changed_ioprio(struct cfq_io_context *cic) static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) { struct cfq_io_context *cic; + struct rb_node *n; write_lock(&cfq_exit_lock); - cic = ioc->cic; - - changed_ioprio(cic); - - list_for_each_entry(cic, &cic->list, list) + n = rb_first(&ioc->cic_root); + while (n != NULL) { + cic = rb_entry(n, struct cfq_io_context, rb_node); + changed_ioprio(cic); + n = rb_next(n); + } write_unlock(&cfq_exit_lock); @@ -1445,14 +1472,67 @@ out: return cfqq; } +static struct cfq_io_context * +cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) +{ + struct rb_node *n = ioc->cic_root.rb_node; + struct cfq_io_context *cic; + void *key = cfqd; + + while (n) { + cic = rb_entry(n, struct cfq_io_context, rb_node); + + if (key < cic->key) + n = n->rb_left; + else if (key > cic->key) + n = n->rb_right; + else + return cic; + } + + return NULL; +} + +static inline void +cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct rb_node **p = &ioc->cic_root.rb_node; + struct rb_node *parent = NULL; + struct cfq_io_context *__cic; + + read_lock(&cfq_exit_lock); + + cic->ioc = ioc; + cic->key = cfqd; + + ioc->set_ioprio = cfq_ioc_set_ioprio; + + while (*p) { + parent = *p; + __cic = rb_entry(parent, struct cfq_io_context, rb_node); + + if (cic->key < __cic->key) + p = &(*p)->rb_left; + else if (cic->key > __cic->key) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&cic->rb_node, parent, p); + rb_insert_color(&cic->rb_node, &ioc->cic_root); + list_add(&cic->queue_list, &cfqd->cic_list); + read_unlock(&cfq_exit_lock); +} + /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more - * than one device managed by cfq. Note that caller is holding a reference to - * cfqq, so we don't need to worry about it disappearing + * than one device managed by cfq. */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) +cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; @@ -1463,88 +1543,15 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) if (!ioc) return NULL; -restart: - if ((cic = ioc->cic) == NULL) { - cic = cfq_alloc_io_context(cfqd, gfp_mask); - - if (cic == NULL) - goto err; - - /* - * manually increment generic io_context usage count, it - * cannot go away since we are already holding one ref to it - */ - cic->ioc = ioc; - cic->key = cfqd; - read_lock(&cfq_exit_lock); - ioc->set_ioprio = cfq_ioc_set_ioprio; - ioc->cic = cic; - list_add(&cic->queue_list, &cfqd->cic_list); - read_unlock(&cfq_exit_lock); - } else { - struct cfq_io_context *__cic; - - /* - * the first cic on the list is actually the head itself - */ - if (cic->key == cfqd) - goto out; - - if (unlikely(!cic->key)) { - read_lock(&cfq_exit_lock); - if (list_empty(&cic->list)) - ioc->cic = NULL; - else - ioc->cic = list_entry(cic->list.next, - struct cfq_io_context, - list); - read_unlock(&cfq_exit_lock); - kmem_cache_free(cfq_ioc_pool, cic); - atomic_dec(&ioc_count); - goto restart; - } - - /* - * cic exists, check if we already are there. linear search - * should be ok here, the list will usually not be more than - * 1 or a few entries long - */ - list_for_each_entry(__cic, &cic->list, list) { - /* - * this process is already holding a reference to - * this queue, so no need to get one more - */ - if (__cic->key == cfqd) { - cic = __cic; - goto out; - } - if (unlikely(!__cic->key)) { - read_lock(&cfq_exit_lock); - list_del(&__cic->list); - read_unlock(&cfq_exit_lock); - kmem_cache_free(cfq_ioc_pool, __cic); - atomic_dec(&ioc_count); - goto restart; - } - } + cic = cfq_cic_rb_lookup(cfqd, ioc); + if (cic) + goto out; - /* - * nope, process doesn't have a cic assoicated with this - * cfqq yet. get a new one and add to list - */ - __cic = cfq_alloc_io_context(cfqd, gfp_mask); - if (__cic == NULL) - goto err; - - __cic->ioc = ioc; - __cic->key = cfqd; - read_lock(&cfq_exit_lock); - list_add(&__cic->list, &cic->list); - list_add(&__cic->queue_list, &cfqd->cic_list); - read_unlock(&cfq_exit_lock); - cic = __cic; - } + cic = cfq_alloc_io_context(cfqd, gfp_mask); + if (cic == NULL) + goto err; + cfq_cic_link(cfqd, ioc, cic); out: return cic; err: @@ -1577,7 +1584,33 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; } -#define sample_valid(samples) ((samples) > 80) +static void +cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, + struct cfq_rq *crq) +{ + sector_t sdist; + u64 total; + + if (cic->last_request_pos < crq->request->sector) + sdist = crq->request->sector - cic->last_request_pos; + else + sdist = cic->last_request_pos - crq->request->sector; + + /* + * Don't allow the seek distance to get too large from the + * odd fragment, pagein, etc + */ + if (cic->seek_samples <= 60) /* second&third seek */ + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); + else + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); + + cic->seek_samples = (7*cic->seek_samples + 256) / 8; + cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; + total = cic->seek_total + (cic->seek_samples/2); + do_div(total, cic->seek_samples); + cic->seek_mean = (sector_t)total; +} /* * Disable idle window if the process thinks too long or seeks so much that @@ -1690,9 +1723,11 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cic = crq->io_context; cfq_update_io_thinktime(cfqd, cic); + cfq_update_io_seektime(cfqd, cic, crq); cfq_update_idle_window(cfqd, cfqq, cic); cic->last_queue = jiffies; + cic->last_request_pos = crq->request->sector + crq->request->nr_sectors; if (cfqq == cfqd->active_queue) { /* @@ -1825,14 +1860,6 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) cfq_resort_rr_list(cfqq, 0); } -static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) -{ - if (rw == READ || process_sync(task)) - return task->pid; - - return CFQ_KEY_ASYNC; -} - static inline int __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct task_struct *task, int rw) @@ -1965,7 +1992,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, might_sleep_if(gfp_mask & __GFP_WAIT); - cic = cfq_get_io_context(cfqd, key, gfp_mask); + cic = cfq_get_io_context(cfqd, gfp_mask); spin_lock_irqsave(q->queue_lock, flags); @@ -2133,11 +2160,14 @@ static void cfq_exit_queue(elevator_t *e) request_queue_t *q = cfqd->queue; cfq_shutdown_timer_wq(cfqd); + write_lock(&cfq_exit_lock); spin_lock_irq(q->queue_lock); + if (cfqd->active_queue) __cfq_slice_expired(cfqd, cfqd->active_queue, 0); - while(!list_empty(&cfqd->cic_list)) { + + while (!list_empty(&cfqd->cic_list)) { struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); @@ -2152,6 +2182,7 @@ static void cfq_exit_queue(elevator_t *e) cic->key = NULL; list_del_init(&cic->queue_list); } + spin_unlock_irq(q->queue_lock); write_unlock(&cfq_exit_lock); @@ -2191,7 +2222,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) if (!cfqd->cfq_hash) goto out_cfqhash; - cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); + cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool); if (!cfqd->crq_pool) goto out_crqpool; @@ -2227,7 +2258,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; - cfqd->cfq_max_depth = cfq_max_depth; return 0; out_crqpool: @@ -2310,7 +2340,6 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); -SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -2339,7 +2368,6 @@ STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -2356,7 +2384,6 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), - CFQ_ATTR(max_depth), __ATTR_NULL }; diff --git a/block/genhd.c b/block/genhd.c index 64510fd88621..db4c60c802d6 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -454,8 +454,8 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page) disk_round_stats(disk); preempt_enable(); return sprintf(page, - "%8u %8u %8llu %8u " - "%8u %8u %8llu %8u " + "%8lu %8lu %8llu %8u " + "%8lu %8lu %8llu %8u " "%8u %8u %8u" "\n", disk_stat_read(disk, ios[READ]), @@ -649,7 +649,7 @@ static int diskstats_show(struct seq_file *s, void *v) preempt_disable(); disk_round_stats(gp); preempt_enable(); - seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n", + seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n", gp->major, n + gp->first_minor, disk_name(gp, n, buf), disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]), (unsigned long long)disk_stat_read(gp, sectors[0]), diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 062067fa7ead..5b26af8597f3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -785,6 +785,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->hardsect_size = max(t->hardsect_size,b->hardsect_size); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -906,17 +908,15 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) __FUNCTION__, depth); } - tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; - tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; - memset(tag_index, 0, depth * sizeof(struct request *)); - memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; @@ -2479,10 +2479,12 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, rq->rq_disk = bd_disk; rq->flags |= REQ_NOMERGE; rq->end_io = done; - elv_add_request(q, rq, where, 1); - generic_unplug_device(q); + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where, 1); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); } - EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); /** @@ -3512,7 +3514,7 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); @@ -3537,11 +3539,17 @@ void put_io_context(struct io_context *ioc) BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + struct rb_node *n = rb_first(&ioc->cic_root); + + cic = rb_entry(n, struct cfq_io_context, rb_node); + cic->dtor(ioc); + } rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -3554,6 +3562,7 @@ void exit_io_context(void) { unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; local_irq_save(flags); task_lock(current); @@ -3565,9 +3574,11 @@ void exit_io_context(void) if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); - + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } + put_io_context(ioc); } @@ -3596,7 +3607,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; tsk->io_context = ret; } diff --git a/drivers/Kconfig b/drivers/Kconfig index bddf431bbb72..9f5c0da57c90 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -70,4 +70,6 @@ source "drivers/sn/Kconfig" source "drivers/edac/Kconfig" +source "drivers/rtc/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 5c69b86db624..424955274e60 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_USB_GADGET) += usb/gadget/ obj-$(CONFIG_GAMEPORT) += input/gameport/ obj-$(CONFIG_INPUT) += input/ obj-$(CONFIG_I2O) += message/ +obj-$(CONFIG_RTC_LIB) += rtc/ obj-$(CONFIG_I2C) += i2c/ obj-$(CONFIG_W1) += w1/ obj-$(CONFIG_HWMON) += hwmon/ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index ac5bbaedac1b..13b5fd5854a8 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -156,12 +156,10 @@ acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) { if (efi_enabled) { addr->pointer_type = ACPI_PHYSICAL_POINTER; - if (efi.acpi20) - addr->pointer.physical = - (acpi_physical_address) virt_to_phys(efi.acpi20); - else if (efi.acpi) - addr->pointer.physical = - (acpi_physical_address) virt_to_phys(efi.acpi); + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + addr->pointer.physical = efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + addr->pointer.physical = efi.acpi; else { printk(KERN_ERR PREFIX "System description tables not found\n"); @@ -182,22 +180,14 @@ acpi_status acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void __iomem ** virt) { - if (efi_enabled) { - if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { - *virt = (void __iomem *)phys_to_virt(phys); - } else { - *virt = ioremap(phys, size); - } - } else { - if (phys > ULONG_MAX) { - printk(KERN_ERR PREFIX "Cannot map memory that high\n"); - return AE_BAD_PARAMETER; - } - /* - * ioremap checks to ensure this is in reserved space - */ - *virt = ioremap((unsigned long)phys, size); + if (phys > ULONG_MAX) { + printk(KERN_ERR PREFIX "Cannot map memory that high\n"); + return AE_BAD_PARAMETER; } + /* + * ioremap checks to ensure this is in reserved space + */ + *virt = ioremap((unsigned long)phys, size); if (!*virt) return AE_NO_MEMORY; @@ -409,18 +399,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) { u32 dummy; void __iomem *virt_addr; - int iomem = 0; - if (efi_enabled) { - if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { - /* HACK ALERT! We can use readb/w/l on real memory too.. */ - virt_addr = (void __iomem *)phys_to_virt(phys_addr); - } else { - iomem = 1; - virt_addr = ioremap(phys_addr, width); - } - } else - virt_addr = (void __iomem *)phys_to_virt(phys_addr); + virt_addr = ioremap(phys_addr, width); if (!value) value = &dummy; @@ -438,10 +418,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) BUG(); } - if (efi_enabled) { - if (iomem) - iounmap(virt_addr); - } + iounmap(virt_addr); return AE_OK; } @@ -450,18 +427,8 @@ acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) { void __iomem *virt_addr; - int iomem = 0; - if (efi_enabled) { - if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { - /* HACK ALERT! We can use writeb/w/l on real memory too */ - virt_addr = (void __iomem *)phys_to_virt(phys_addr); - } else { - iomem = 1; - virt_addr = ioremap(phys_addr, width); - } - } else - virt_addr = (void __iomem *)phys_to_virt(phys_addr); + virt_addr = ioremap(phys_addr, width); switch (width) { case 8: @@ -477,8 +444,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) BUG(); } - if (iomem) - iounmap(virt_addr); + iounmap(virt_addr); return AE_OK; } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 99a3a28594da..713b763884a9 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -246,7 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr) } /* -------------------------------------------------------------------------- - Common ACPI processor fucntions + Common ACPI processor functions -------------------------------------------------------------------------- */ /* diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 31d4f3ffc265..7f37c7cc5ef1 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -587,7 +587,8 @@ int __init acpi_table_init(void) return -ENODEV; } - rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); + rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys, + sizeof(struct acpi_table_rsdp)); if (!rsdp) { printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); return -ENODEV; diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 69f4c7ce9a63..cac09e353be8 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -1972,7 +1972,7 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai) "(itf %d): No suitable DMA available.\n", lanai->number); return -EBUSY; } - if (pci_set_consistent_dma_mask(pci, 0xFFFFFFFF) != 0) { + if (pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK) != 0) { printk(KERN_WARNING DEV_LABEL "(itf %d): No suitable DMA available.\n", lanai->number); return -EBUSY; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 105a0d61eb1f..dd547af4681a 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -47,16 +47,16 @@ static struct kset_uevent_ops memory_uevent_ops = { .uevent = memory_uevent, }; -static struct notifier_block *memory_chain; +static BLOCKING_NOTIFIER_HEAD(memory_chain); int register_memory_notifier(struct notifier_block *nb) { - return notifier_chain_register(&memory_chain, nb); + return blocking_notifier_chain_register(&memory_chain, nb); } void unregister_memory_notifier(struct notifier_block *nb) { - notifier_chain_unregister(&memory_chain, nb); + blocking_notifier_chain_unregister(&memory_chain, nb); } /* @@ -140,7 +140,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf) static inline int memory_notify(unsigned long val, void *v) { - return notifier_call_chain(&memory_chain, val, v); + return blocking_notifier_call_chain(&memory_chain, val, v); } /* diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 9bdea2a5cf0e..45bcda544880 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -41,6 +41,7 @@ #include <linux/timer.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/jiffies.h> #include <linux/random.h> #include <asm/io.h> #include <asm/uaccess.h> @@ -311,11 +312,10 @@ static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller) CommandsRemaining = CommandAllocationGroupSize; CommandGroupByteCount = CommandsRemaining * CommandAllocationLength; - AllocationPointer = kmalloc(CommandGroupByteCount, GFP_ATOMIC); + AllocationPointer = kzalloc(CommandGroupByteCount, GFP_ATOMIC); if (AllocationPointer == NULL) return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION"); - memset(AllocationPointer, 0, CommandGroupByteCount); } Command = (DAC960_Command_T *) AllocationPointer; AllocationPointer += CommandAllocationLength; @@ -2709,14 +2709,12 @@ DAC960_DetectController(struct pci_dev *PCI_Device, void __iomem *BaseAddress; int i; - Controller = (DAC960_Controller_T *) - kmalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC); + Controller = kzalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC); if (Controller == NULL) { DAC960_Error("Unable to allocate Controller structure for " "Controller at\n", NULL); return NULL; } - memset(Controller, 0, sizeof(DAC960_Controller_T)); Controller->ControllerNumber = DAC960_ControllerCount; DAC960_Controllers[DAC960_ControllerCount++] = Controller; Controller->Bus = PCI_Device->bus->number; @@ -3657,8 +3655,8 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command) (NewEnquiry->EventLogSequenceNumber != OldEnquiry->EventLogSequenceNumber) || Controller->MonitoringTimerCount == 0 || - (jiffies - Controller->SecondaryMonitoringTime - >= DAC960_SecondaryMonitoringInterval)) + time_after_eq(jiffies, Controller->SecondaryMonitoringTime + + DAC960_SecondaryMonitoringInterval)) { Controller->V1.NeedLogicalDriveInformation = true; Controller->V1.NewEventLogSequenceNumber = @@ -5643,8 +5641,8 @@ static void DAC960_MonitoringTimerFunction(unsigned long TimerData) unsigned int StatusChangeCounter = Controller->V2.HealthStatusBuffer->StatusChangeCounter; boolean ForceMonitoringCommand = false; - if (jiffies - Controller->SecondaryMonitoringTime - > DAC960_SecondaryMonitoringInterval) + if (time_after(jiffies, Controller->SecondaryMonitoringTime + + DAC960_SecondaryMonitoringInterval)) { int LogicalDriveNumber; for (LogicalDriveNumber = 0; @@ -5672,8 +5670,8 @@ static void DAC960_MonitoringTimerFunction(unsigned long TimerData) ControllerInfo->ConsistencyChecksActive + ControllerInfo->RebuildsActive + ControllerInfo->OnlineExpansionsActive == 0 || - jiffies - Controller->PrimaryMonitoringTime - < DAC960_MonitoringTimerInterval) && + time_before(jiffies, Controller->PrimaryMonitoringTime + + DAC960_MonitoringTimerInterval)) && !ForceMonitoringCommand) { Controller->MonitoringTimer.expires = @@ -5810,8 +5808,8 @@ static void DAC960_Message(DAC960_MessageLevel_T MessageLevel, Controller->ProgressBufferLength = Length; if (Controller->EphemeralProgressMessage) { - if (jiffies - Controller->LastProgressReportTime - >= DAC960_ProgressReportingInterval) + if (time_after_eq(jiffies, Controller->LastProgressReportTime + + DAC960_ProgressReportingInterval)) { printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], Controller->ControllerNumber, Buffer); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e57ac5a43246..ae0949b3394f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -383,8 +383,9 @@ config BLK_DEV_RAM thus say N here. config BLK_DEV_RAM_COUNT - int "Default number of RAM disks" if BLK_DEV_RAM + int "Default number of RAM disks" default "16" + depends on BLK_DEV_RAM help The default value is 16 RAM disks. Change this if you know what are doing. If you boot from a filesystem that needs to be extracted @@ -400,13 +401,16 @@ config BLK_DEV_RAM_SIZE 8192. config BLK_DEV_INITRD - bool "Initial RAM disk (initrd) support" + bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" help - The initial RAM disk is a RAM disk that is loaded by the boot loader - (loadlin or lilo) and that is mounted as root before the normal boot - procedure. It is typically used to load modules needed to mount the - "real" root file system, etc. See <file:Documentation/initrd.txt> - for details. + The initial RAM filesystem is a ramfs which is loaded by the + boot loader (loadlin or lilo) and that is mounted as root + before the normal boot procedure. It is typically used to + load modules needed to mount the "real" root file system, + etc. See <file:Documentation/initrd.txt> for details. + + If RAM disk support (BLK_DEV_RAM) is also included, this + also enables initial RAM disk (initrd) support. config CDROM_PKTCDVD diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c index a5c1c8e871ec..4cb9c1336287 100644 --- a/drivers/block/acsi_slm.c +++ b/drivers/block/acsi_slm.c @@ -369,8 +369,6 @@ static ssize_t slm_read( struct file *file, char *buf, size_t count, int length; int end; - if (count < 0) - return( -EINVAL ); if (!(page = __get_free_page( GFP_KERNEL ))) return( -ENOMEM ); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 32fea55fac48..393b86a3dbf8 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp) return; } - d->bufpool = mempool_create(MIN_BUFS, - mempool_alloc_slab, mempool_free_slab, - buf_pool_cache); + d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); if (d->bufpool == NULL) { printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool " "for %ld.%ld\n", d->aoemajor, d->aoeminor); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 71ec9e664383..1b0fd31c57c3 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -996,13 +996,11 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, status = -EINVAL; goto cleanup1; } - buff = (unsigned char **) kmalloc(MAXSGENTRIES * - sizeof(char *), GFP_KERNEL); + buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } - memset(buff, 0, MAXSGENTRIES); buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); if (!buff_size) { @@ -2729,9 +2727,9 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, return; } } +default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ -default_int_mode: c->intr[SIMPLE_MODE_INT] = pdev->irq; return; } @@ -2940,13 +2938,12 @@ static void cciss_getgeometry(int cntl_num) int block_size; int total_size; - ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL); + ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); if (ld_buff == NULL) { printk(KERN_ERR "cciss: out of memory\n"); return; } - memset(ld_buff, 0, sizeof(ReportLunData_struct)); size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); if (size_buff == NULL) { @@ -3060,10 +3057,9 @@ static int alloc_cciss_hba(void) for(i=0; i< MAX_CTLR; i++) { if (!hba[i]) { ctlr_info_t *p; - p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL); + p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); if (!p) goto Enomem; - memset(p, 0, sizeof(ctlr_info_t)); for (n = 0; n < NWD; n++) p->gendisk[n] = disk[n]; hba[i] = p; diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 0e66e904bd8c..597c007fe81b 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -1027,12 +1027,11 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) int i; c = (ctlr_info_t *) hba[cntl_num]; - ld_buff = kmalloc(reportlunsize, GFP_KERNEL); + ld_buff = kzalloc(reportlunsize, GFP_KERNEL); if (ld_buff == NULL) { printk(KERN_ERR "cciss: out of memory\n"); return; } - memset(ld_buff, 0, reportlunsize); inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (inq_buff == NULL) { printk(KERN_ERR "cciss: out of memory\n"); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 840919bba76c..bedb689b051f 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -170,6 +170,7 @@ static int print_unex = 1; #include <linux/mm.h> #include <linux/bio.h> #include <linux/string.h> +#include <linux/jiffies.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mc146818rtc.h> /* CMOS defines */ @@ -250,6 +251,18 @@ static int irqdma_allocated; #include <linux/cdrom.h> /* for the compatibility eject ioctl */ #include <linux/completion.h> +/* + * Interrupt freeing also means /proc VFS work - dont do it + * from interrupt context. We push this work into keventd: + */ +static void fd_free_irq_fn(void *data) +{ + fd_free_irq(); +} + +static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL); + + static struct request *current_req; static struct request_queue *floppy_queue; static void do_fd_request(request_queue_t * q); @@ -735,7 +748,7 @@ static int disk_change(int drive) { int fdc = FDC(drive); #ifdef FLOPPY_SANITY_CHECK - if (jiffies - UDRS->select_date < UDP->select_delay) + if (time_before(jiffies, UDRS->select_date + UDP->select_delay)) DPRINT("WARNING disk change called early\n"); if (!(FDCS->dor & (0x10 << UNIT(drive))) || (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { @@ -1063,7 +1076,7 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function) return 1; } - if ((signed)(jiffies - delay) < 0) { + if (time_before(jiffies, delay)) { del_timer(&fd_timer); fd_timer.function = function; fd_timer.expires = delay; @@ -1523,7 +1536,7 @@ static void setup_rw_floppy(void) * again just before spinup completion. Beware that * after scandrives, we must again wait for selection. */ - if ((signed)(ready_date - jiffies) > DP->select_delay) { + if (time_after(ready_date, jiffies + DP->select_delay)) { ready_date -= DP->select_delay; function = (timeout_fn) floppy_start; } else @@ -3811,7 +3824,7 @@ static int check_floppy_change(struct gendisk *disk) if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY)) return 1; - if (UDP->checkfreq < (int)(jiffies - UDRS->last_checked)) { + if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { if (floppy_grab_irq_and_dma()) { return 1; } @@ -4433,6 +4446,13 @@ static int floppy_grab_irq_and_dma(void) return 0; } spin_unlock_irqrestore(&floppy_usage_lock, flags); + + /* + * We might have scheduled a free_irq(), wait it to + * drain first: + */ + flush_scheduled_work(); + if (fd_request_irq()) { DPRINT("Unable to grab IRQ%d for the floppy driver\n", FLOPPY_IRQ); @@ -4522,7 +4542,7 @@ static void floppy_release_irq_and_dma(void) if (irqdma_allocated) { fd_disable_dma(); fd_free_dma(); - fd_free_irq(); + schedule_work(&fd_free_irq_work); irqdma_allocated = 0; } set_dor(0, ~0, 8); @@ -4633,6 +4653,8 @@ void cleanup_module(void) /* eject disk, if any */ fd_eject(0); + flush_scheduled_work(); /* fd_free_irq() might be pending */ + wait_for_completion(&device_release); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 74bf0255e98f..9c3b94e8f03b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -839,7 +839,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file, set_blocksize(bdev, lo_blocksize); - kernel_thread(loop_thread, lo, CLONE_KERNEL); + error = kernel_thread(loop_thread, lo, CLONE_KERNEL); + if (error < 0) + goto out_putf; wait_for_completion(&lo->lo_done); return 0; diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c index 08d858ad64db..41a237c5957d 100644 --- a/drivers/block/paride/bpck6.c +++ b/drivers/block/paride/bpck6.c @@ -224,10 +224,9 @@ static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose ) static int bpck6_init_proto(PIA *pi) { - Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL); + Interface *p = kzalloc(sizeof(Interface), GFP_KERNEL); if (p) { - memset(p, 0, sizeof(Interface)); pi->private = (unsigned long)p; return 0; } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 62d2464c12f2..2403721f9db1 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -151,6 +151,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV}; #include <linux/cdrom.h> /* for the eject ioctl */ #include <linux/blkdev.h> #include <linux/blkpg.h> +#include <linux/kernel.h> #include <asm/uaccess.h> #include <linux/sched.h> #include <linux/workqueue.h> @@ -275,7 +276,7 @@ static void pd_print_error(struct pd_unit *disk, char *msg, int status) int i; printk("%s: %s: status = 0x%x =", disk->name, msg, status); - for (i = 0; i < 18; i++) + for (i = 0; i < ARRAY_SIZE(pd_errs); i++) if (status & (1 << i)) printk(" %s", pd_errs[i]); printk("\n"); diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 6f5df0fad703..79b868254032 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -643,7 +643,8 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t static int __init pg_init(void) { - int unit, err = 0; + int unit; + int err; if (disable){ err = -1; @@ -657,16 +658,17 @@ static int __init pg_init(void) goto out; } - if (register_chrdev(major, name, &pg_fops)) { + err = register_chrdev(major, name, &pg_fops); + if (err < 0) { printk("pg_init: unable to get major number %d\n", major); for (unit = 0; unit < PG_UNITS; unit++) { struct pg *dev = &devices[unit]; if (dev->present) pi_release(dev->pi); } - err = -1; goto out; } + major = err; /* In case the user specified `major=0' (dynamic) */ pg_class = class_create(THIS_MODULE, "pg"); if (IS_ERR(pg_class)) { err = PTR_ERR(pg_class); diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 715ae5dc88fb..d2013d362403 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -943,7 +943,8 @@ static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, static int __init pt_init(void) { - int unit, err = 0; + int unit; + int err; if (disable) { err = -1; @@ -955,14 +956,15 @@ static int __init pt_init(void) goto out; } - if (register_chrdev(major, name, &pt_fops)) { + err = register_chrdev(major, name, &pt_fops); + if (err < 0) { printk("pt_init: unable to get major number %d\n", major); for (unit = 0; unit < PT_UNITS; unit++) if (pt[unit].present) pi_release(pt[unit].pi); - err = -1; goto out; } + major = err; pt_class = class_create(THIS_MODULE, "pt"); if (IS_ERR(pt_class)) { err = PTR_ERR(pt_class); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 1d261f985f31..a04f60693c39 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) return 1; } -static void *pkt_rb_alloc(gfp_t gfp_mask, void *data) -{ - return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); -} - -static void pkt_rb_free(void *ptr, void *data) -{ - kfree(ptr); -} - static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) { struct rb_node *n = rb_next(&node->rb_node); @@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file) } -static void *psd_pool_alloc(gfp_t gfp_mask, void *data) -{ - return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); -} - -static void psd_pool_free(void *ptr, void *data) -{ - kfree(ptr); -} - static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) { struct packet_stacked_data *psd = bio->bi_private; @@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) if (!pd) return ret; - pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); + pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, + sizeof(struct pkt_rb_node)); if (!pd->rb_pool) goto out_mem; @@ -2639,7 +2620,8 @@ static int __init pkt_init(void) { int ret; - psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL); + psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE, + sizeof(struct packet_stacked_data)); if (!psd_pool) return -ENOMEM; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index c16e66b9c7a7..f7d4c65a7b8c 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -50,6 +50,7 @@ #include <linux/timer.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/dma-mapping.h> #include <linux/fcntl.h> /* O_ACCMODE */ #include <linux/hdreg.h> /* HDIO_GETGEO */ @@ -881,8 +882,8 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n", card->card_number, dev->bus->number, dev->devfn); - if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) && - pci_set_dma_mask(dev, 0xffffffffLL)) { + if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && + pci_set_dma_mask(dev, DMA_32BIT_MASK)) { printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); return -ENOMEM; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 5980f3e886fc..73d30bf01582 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -187,6 +187,7 @@ config MOXA_SMARTIO config ISI tristate "Multi-Tech multiport card support (EXPERIMENTAL)" depends on SERIAL_NONSTANDARD + select FW_LOADER help This is a driver for the Multi-Tech cards which provide several serial ports. The driver is experimental and can currently only be @@ -695,7 +696,7 @@ config NVRAM config RTC tristate "Enhanced Real Time Clock Support" - depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV + depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM ---help--- If you say Y here and create a character special file /dev/rtc with major number 10 and minor number 135 using mknod ("man mknod"), you diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 70b8ed9cd172..4c67135c12d8 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -11,6 +11,7 @@ #include <linux/gfp.h> #include <linux/page-flags.h> #include <linux/mm.h> +#include <linux/jiffies.h> #include "agp.h" /* NVIDIA registers */ @@ -256,7 +257,7 @@ static void nvidia_tlbflush(struct agp_memory *mem) do { pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg); - if ((signed)(end - jiffies) <= 0) { + if (time_before_eq(end, jiffies)) { printk(KERN_ERR PFX "TLB flush took more than 3 seconds.\n"); } diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 641f7633878c..b7f7951c4587 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c @@ -175,7 +175,7 @@ int drm_stub_open(struct inode *inode, struct file *filp) drm_device_t *dev = NULL; int minor = iminor(inode); int err = -ENODEV; - struct file_operations *old_fops; + const struct file_operations *old_fops; DRM_DEBUG("\n"); diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index ae0aa6d7e0bb..c658dde3633b 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c @@ -126,7 +126,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) drm_device_t *dev = priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; - struct file_operations *old_fops; + const struct file_operations *old_fops; int retcode = 0; if (buf_priv->currently_mapped == I810_BUF_MAPPED) diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index 163f2cbfe60d..b0f815d8cea8 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c @@ -128,7 +128,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp) drm_device_t *dev = priv->head->dev; drm_i830_buf_priv_t *buf_priv = buf->dev_private; drm_i830_private_t *dev_priv = dev->dev_private; - struct file_operations *old_fops; + const struct file_operations *old_fops; unsigned long virtual; int retcode = 0; diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 765c5c108bf4..9cad8501d62c 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c @@ -486,8 +486,7 @@ static void pc_close(struct tty_struct * tty, struct file * filp) } /* End channel is open more than once */ /* Port open only once go ahead with shutdown & reset */ - if (ch->count < 0) - BUG(); + BUG_ON(ch->count < 0); /* --------------------------------------------------------------- Let the rest of the driver know the channel is being closed. diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c index b2e0928e8428..093fdf98b19a 100644 --- a/drivers/char/ftape/lowlevel/fdc-io.c +++ b/drivers/char/ftape/lowlevel/fdc-io.c @@ -607,7 +607,7 @@ void fdc_reset(void) fdc_mode = fdc_idle; - /* maybe the cli()/sti() pair is not necessary, BUT: + /* maybe the spin_lock_irq* pair is not necessary, BUT: * the following line MUST be here. Otherwise fdc_interrupt_wait() * won't wait. Note that fdc_reset() is called from * ftape_dumb_stop() when the fdc is busy transferring data. In this diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 7c0684deea06..932feedda262 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -90,7 +90,7 @@ static unsigned int ipmi_poll(struct file *file, poll_table *wait) spin_lock_irqsave(&priv->recv_msg_lock, flags); - if (! list_empty(&(priv->recv_msgs))) + if (!list_empty(&(priv->recv_msgs))) mask |= (POLLIN | POLLRDNORM); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); @@ -789,21 +789,53 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" " interface. Other values will set the major device number" " to that value."); +/* Keep track of the devices that are registered. */ +struct ipmi_reg_list { + dev_t dev; + struct list_head link; +}; +static LIST_HEAD(reg_list); +static DEFINE_MUTEX(reg_list_mutex); + static struct class *ipmi_class; -static void ipmi_new_smi(int if_num) +static void ipmi_new_smi(int if_num, struct device *device) { dev_t dev = MKDEV(ipmi_major, if_num); + struct ipmi_reg_list *entry; devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, "ipmidev/%d", if_num); - class_device_create(ipmi_class, NULL, dev, NULL, "ipmi%d", if_num); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + printk(KERN_ERR "ipmi_devintf: Unable to create the" + " ipmi class device link\n"); + return; + } + entry->dev = dev; + + mutex_lock(®_list_mutex); + class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num); + list_add(&entry->link, ®_list); + mutex_unlock(®_list_mutex); } static void ipmi_smi_gone(int if_num) { - class_device_destroy(ipmi_class, MKDEV(ipmi_major, if_num)); + dev_t dev = MKDEV(ipmi_major, if_num); + struct ipmi_reg_list *entry; + + mutex_lock(®_list_mutex); + list_for_each_entry(entry, ®_list, link) { + if (entry->dev == dev) { + list_del(&entry->link); + kfree(entry); + break; + } + } + class_device_destroy(ipmi_class, dev); + mutex_unlock(®_list_mutex); devfs_remove("ipmidev/%d", if_num); } @@ -856,6 +888,14 @@ module_init(init_ipmi_devintf); static __exit void cleanup_ipmi(void) { + struct ipmi_reg_list *entry, *entry2; + mutex_lock(®_list_mutex); + list_for_each_entry_safe(entry, entry2, ®_list, link) { + list_del(&entry->link); + class_device_destroy(ipmi_class, entry->dev); + kfree(entry); + } + mutex_unlock(®_list_mutex); class_destroy(ipmi_class); ipmi_smi_watcher_unregister(&smi_watcher); devfs_remove(DEVICE_NAME); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index abd4c5118a1b..40eb005b9d77 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -48,7 +48,7 @@ #define PFX "IPMI message handler: " -#define IPMI_DRIVER_VERSION "38.0" +#define IPMI_DRIVER_VERSION "39.0" static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); @@ -162,6 +162,28 @@ struct ipmi_proc_entry }; #endif +struct bmc_device +{ + struct platform_device *dev; + struct ipmi_device_id id; + unsigned char guid[16]; + int guid_set; + + struct kref refcount; + + /* bmc device attributes */ + struct device_attribute device_id_attr; + struct device_attribute provides_dev_sdrs_attr; + struct device_attribute revision_attr; + struct device_attribute firmware_rev_attr; + struct device_attribute version_attr; + struct device_attribute add_dev_support_attr; + struct device_attribute manufacturer_id_attr; + struct device_attribute product_id_attr; + struct device_attribute guid_attr; + struct device_attribute aux_firmware_rev_attr; +}; + #define IPMI_IPMB_NUM_SEQ 64 #define IPMI_MAX_CHANNELS 16 struct ipmi_smi @@ -178,9 +200,8 @@ struct ipmi_smi /* Used for wake ups at startup. */ wait_queue_head_t waitq; - /* The IPMI version of the BMC on the other end. */ - unsigned char version_major; - unsigned char version_minor; + struct bmc_device *bmc; + char *my_dev_name; /* This is the lower-layer's sender routine. */ struct ipmi_smi_handlers *handlers; @@ -194,6 +215,9 @@ struct ipmi_smi struct ipmi_proc_entry *proc_entries; #endif + /* Driver-model device for the system interface. */ + struct device *si_dev; + /* A table of sequence numbers for this interface. We use the sequence numbers for IPMB messages that go out of the interface to match them up with their responses. A routine @@ -312,6 +336,7 @@ struct ipmi_smi /* Events that were received with the proper format. */ unsigned int events; }; +#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) /* Used to mark an interface entry that cannot be used but is not a * free entry, either, primarily used at creation and deletion time so @@ -320,6 +345,15 @@ struct ipmi_smi #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ || (i == IPMI_INVALID_INTERFACE_ENTRY)) +/** + * The driver model view of the IPMI messaging driver. + */ +static struct device_driver ipmidriver = { + .name = "ipmi", + .bus = &platform_bus_type +}; +static DEFINE_MUTEX(ipmidriver_mutex); + #define MAX_IPMI_INTERFACES 4 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; @@ -393,7 +427,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) if (IPMI_INVALID_INTERFACE(intf)) continue; spin_unlock_irqrestore(&interfaces_lock, flags); - watcher->new_smi(i); + watcher->new_smi(i, intf->si_dev); spin_lock_irqsave(&interfaces_lock, flags); } spin_unlock_irqrestore(&interfaces_lock, flags); @@ -409,14 +443,14 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) } static void -call_smi_watchers(int i) +call_smi_watchers(int i, struct device *dev) { struct ipmi_smi_watcher *w; down_read(&smi_watchers_sem); list_for_each_entry(w, &smi_watchers, link) { if (try_module_get(w->owner)) { - w->new_smi(i); + w->new_smi(i, dev); module_put(w->owner); } } @@ -844,8 +878,8 @@ void ipmi_get_version(ipmi_user_t user, unsigned char *major, unsigned char *minor) { - *major = user->intf->version_major; - *minor = user->intf->version_minor; + *major = ipmi_version_major(&user->intf->bmc->id); + *minor = ipmi_version_minor(&user->intf->bmc->id); } int ipmi_set_my_address(ipmi_user_t user, @@ -1553,7 +1587,8 @@ static int version_file_read_proc(char *page, char **start, off_t off, ipmi_smi_t intf = data; return sprintf(out, "%d.%d\n", - intf->version_major, intf->version_minor); + ipmi_version_major(&intf->bmc->id), + ipmi_version_minor(&intf->bmc->id)); } static int stat_file_read_proc(char *page, char **start, off_t off, @@ -1712,6 +1747,470 @@ static void remove_proc_entries(ipmi_smi_t smi) #endif /* CONFIG_PROC_FS */ } +static int __find_bmc_guid(struct device *dev, void *data) +{ + unsigned char *id = data; + struct bmc_device *bmc = dev_get_drvdata(dev); + return memcmp(bmc->guid, id, 16) == 0; +} + +static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, + unsigned char *guid) +{ + struct device *dev; + + dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); + if (dev) + return dev_get_drvdata(dev); + else + return NULL; +} + +struct prod_dev_id { + unsigned int product_id; + unsigned char device_id; +}; + +static int __find_bmc_prod_dev_id(struct device *dev, void *data) +{ + struct prod_dev_id *id = data; + struct bmc_device *bmc = dev_get_drvdata(dev); + + return (bmc->id.product_id == id->product_id + && bmc->id.product_id == id->product_id + && bmc->id.device_id == id->device_id); +} + +static struct bmc_device *ipmi_find_bmc_prod_dev_id( + struct device_driver *drv, + unsigned char product_id, unsigned char device_id) +{ + struct prod_dev_id id = { + .product_id = product_id, + .device_id = device_id, + }; + struct device *dev; + + dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); + if (dev) + return dev_get_drvdata(dev); + else + return NULL; +} + +static ssize_t device_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 10, "%u\n", bmc->id.device_id); +} + +static ssize_t provides_dev_sdrs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 10, "%u\n", + bmc->id.device_revision && 0x80 >> 7); +} + +static ssize_t revision_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 20, "%u\n", + bmc->id.device_revision && 0x0F); +} + +static ssize_t firmware_rev_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, + bmc->id.firmware_revision_2); +} + +static ssize_t ipmi_version_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 20, "%u.%u\n", + ipmi_version_major(&bmc->id), + ipmi_version_minor(&bmc->id)); +} + +static ssize_t add_dev_support_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 10, "0x%02x\n", + bmc->id.additional_device_support); +} + +static ssize_t manufacturer_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); +} + +static ssize_t product_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); +} + +static ssize_t aux_firmware_rev_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", + bmc->id.aux_firmware_revision[3], + bmc->id.aux_firmware_revision[2], + bmc->id.aux_firmware_revision[1], + bmc->id.aux_firmware_revision[0]); +} + +static ssize_t guid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct bmc_device *bmc = dev_get_drvdata(dev); + + return snprintf(buf, 100, "%Lx%Lx\n", + (long long) bmc->guid[0], + (long long) bmc->guid[8]); +} + +static void +cleanup_bmc_device(struct kref *ref) +{ + struct bmc_device *bmc; + + bmc = container_of(ref, struct bmc_device, refcount); + + device_remove_file(&bmc->dev->dev, + &bmc->device_id_attr); + device_remove_file(&bmc->dev->dev, + &bmc->provides_dev_sdrs_attr); + device_remove_file(&bmc->dev->dev, + &bmc->revision_attr); + device_remove_file(&bmc->dev->dev, + &bmc->firmware_rev_attr); + device_remove_file(&bmc->dev->dev, + &bmc->version_attr); + device_remove_file(&bmc->dev->dev, + &bmc->add_dev_support_attr); + device_remove_file(&bmc->dev->dev, + &bmc->manufacturer_id_attr); + device_remove_file(&bmc->dev->dev, + &bmc->product_id_attr); + if (bmc->id.aux_firmware_revision_set) + device_remove_file(&bmc->dev->dev, + &bmc->aux_firmware_rev_attr); + if (bmc->guid_set) + device_remove_file(&bmc->dev->dev, + &bmc->guid_attr); + platform_device_unregister(bmc->dev); + kfree(bmc); +} + +static void ipmi_bmc_unregister(ipmi_smi_t intf) +{ + struct bmc_device *bmc = intf->bmc; + + sysfs_remove_link(&intf->si_dev->kobj, "bmc"); + if (intf->my_dev_name) { + sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); + kfree(intf->my_dev_name); + intf->my_dev_name = NULL; + } + + mutex_lock(&ipmidriver_mutex); + kref_put(&bmc->refcount, cleanup_bmc_device); + mutex_unlock(&ipmidriver_mutex); +} + +static int ipmi_bmc_register(ipmi_smi_t intf) +{ + int rv; + struct bmc_device *bmc = intf->bmc; + struct bmc_device *old_bmc; + int size; + char dummy[1]; + + mutex_lock(&ipmidriver_mutex); + + /* + * Try to find if there is an bmc_device struct + * representing the interfaced BMC already + */ + if (bmc->guid_set) + old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid); + else + old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver, + bmc->id.product_id, + bmc->id.device_id); + + /* + * If there is already an bmc_device, free the new one, + * otherwise register the new BMC device + */ + if (old_bmc) { + kfree(bmc); + intf->bmc = old_bmc; + bmc = old_bmc; + + kref_get(&bmc->refcount); + mutex_unlock(&ipmidriver_mutex); + + printk(KERN_INFO + "ipmi: interfacing existing BMC (man_id: 0x%6.6x," + " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); + } else { + bmc->dev = platform_device_alloc("ipmi_bmc", + bmc->id.device_id); + if (! bmc->dev) { + printk(KERN_ERR + "ipmi_msghandler:" + " Unable to allocate platform device\n"); + return -ENOMEM; + } + bmc->dev->dev.driver = &ipmidriver; + dev_set_drvdata(&bmc->dev->dev, bmc); + kref_init(&bmc->refcount); + + rv = platform_device_register(bmc->dev); + mutex_unlock(&ipmidriver_mutex); + if (rv) { + printk(KERN_ERR + "ipmi_msghandler:" + " Unable to register bmc device: %d\n", + rv); + /* Don't go to out_err, you can only do that if + the device is registered already. */ + return rv; + } + + bmc->device_id_attr.attr.name = "device_id"; + bmc->device_id_attr.attr.owner = THIS_MODULE; + bmc->device_id_attr.attr.mode = S_IRUGO; + bmc->device_id_attr.show = device_id_show; + + bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; + bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; + bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; + bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; + + + bmc->revision_attr.attr.name = "revision"; + bmc->revision_attr.attr.owner = THIS_MODULE; + bmc->revision_attr.attr.mode = S_IRUGO; + bmc->revision_attr.show = revision_show; + + bmc->firmware_rev_attr.attr.name = "firmware_revision"; + bmc->firmware_rev_attr.attr.owner = THIS_MODULE; + bmc->firmware_rev_attr.attr.mode = S_IRUGO; + bmc->firmware_rev_attr.show = firmware_rev_show; + + bmc->version_attr.attr.name = "ipmi_version"; + bmc->version_attr.attr.owner = THIS_MODULE; + bmc->version_attr.attr.mode = S_IRUGO; + bmc->version_attr.show = ipmi_version_show; + + bmc->add_dev_support_attr.attr.name + = "additional_device_support"; + bmc->add_dev_support_attr.attr.owner = THIS_MODULE; + bmc->add_dev_support_attr.attr.mode = S_IRUGO; + bmc->add_dev_support_attr.show = add_dev_support_show; + + bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; + bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; + bmc->manufacturer_id_attr.attr.mode = S_IRUGO; + bmc->manufacturer_id_attr.show = manufacturer_id_show; + + bmc->product_id_attr.attr.name = "product_id"; + bmc->product_id_attr.attr.owner = THIS_MODULE; + bmc->product_id_attr.attr.mode = S_IRUGO; + bmc->product_id_attr.show = product_id_show; + + bmc->guid_attr.attr.name = "guid"; + bmc->guid_attr.attr.owner = THIS_MODULE; + bmc->guid_attr.attr.mode = S_IRUGO; + bmc->guid_attr.show = guid_show; + + bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; + bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; + bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; + bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; + + device_create_file(&bmc->dev->dev, + &bmc->device_id_attr); + device_create_file(&bmc->dev->dev, + &bmc->provides_dev_sdrs_attr); + device_create_file(&bmc->dev->dev, + &bmc->revision_attr); + device_create_file(&bmc->dev->dev, + &bmc->firmware_rev_attr); + device_create_file(&bmc->dev->dev, + &bmc->version_attr); + device_create_file(&bmc->dev->dev, + &bmc->add_dev_support_attr); + device_create_file(&bmc->dev->dev, + &bmc->manufacturer_id_attr); + device_create_file(&bmc->dev->dev, + &bmc->product_id_attr); + if (bmc->id.aux_firmware_revision_set) + device_create_file(&bmc->dev->dev, + &bmc->aux_firmware_rev_attr); + if (bmc->guid_set) + device_create_file(&bmc->dev->dev, + &bmc->guid_attr); + + printk(KERN_INFO + "ipmi: Found new BMC (man_id: 0x%6.6x, " + " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); + } + + /* + * create symlink from system interface device to bmc device + * and back. + */ + rv = sysfs_create_link(&intf->si_dev->kobj, + &bmc->dev->dev.kobj, "bmc"); + if (rv) { + printk(KERN_ERR + "ipmi_msghandler: Unable to create bmc symlink: %d\n", + rv); + goto out_err; + } + + size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); + intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); + if (!intf->my_dev_name) { + rv = -ENOMEM; + printk(KERN_ERR + "ipmi_msghandler: allocate link from BMC: %d\n", + rv); + goto out_err; + } + snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); + + rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, + intf->my_dev_name); + if (rv) { + kfree(intf->my_dev_name); + intf->my_dev_name = NULL; + printk(KERN_ERR + "ipmi_msghandler:" + " Unable to create symlink to bmc: %d\n", + rv); + goto out_err; + } + + return 0; + +out_err: + ipmi_bmc_unregister(intf); + return rv; +} + +static int +send_guid_cmd(ipmi_smi_t intf, int chan) +{ + struct kernel_ipmi_msg msg; + struct ipmi_system_interface_addr si; + + si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + si.channel = IPMI_BMC_CHANNEL; + si.lun = 0; + + msg.netfn = IPMI_NETFN_APP_REQUEST; + msg.cmd = IPMI_GET_DEVICE_GUID_CMD; + msg.data = NULL; + msg.data_len = 0; + return i_ipmi_request(NULL, + intf, + (struct ipmi_addr *) &si, + 0, + &msg, + intf, + NULL, + NULL, + 0, + intf->channels[0].address, + intf->channels[0].lun, + -1, 0); +} + +static void +guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) +{ + if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) + || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) + || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) + /* Not for me */ + return; + + if (msg->msg.data[0] != 0) { + /* Error from getting the GUID, the BMC doesn't have one. */ + intf->bmc->guid_set = 0; + goto out; + } + + if (msg->msg.data_len < 17) { + intf->bmc->guid_set = 0; + printk(KERN_WARNING PFX + "guid_handler: The GUID response from the BMC was too" + " short, it was %d but should have been 17. Assuming" + " GUID is not available.\n", + msg->msg.data_len); + goto out; + } + + memcpy(intf->bmc->guid, msg->msg.data, 16); + intf->bmc->guid_set = 1; + out: + wake_up(&intf->waitq); +} + +static void +get_guid(ipmi_smi_t intf) +{ + int rv; + + intf->bmc->guid_set = 0x2; + intf->null_user_handler = guid_handler; + rv = send_guid_cmd(intf, 0); + if (rv) + /* Send failed, no GUID available. */ + intf->bmc->guid_set = 0; + wait_event(intf->waitq, intf->bmc->guid_set != 2); + intf->null_user_handler = NULL; +} + static int send_channel_info_cmd(ipmi_smi_t intf, int chan) { @@ -1804,8 +2303,8 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, - unsigned char version_major, - unsigned char version_minor, + struct ipmi_device_id *device_id, + struct device *si_dev, unsigned char slave_addr, ipmi_smi_t *new_intf) { @@ -1813,7 +2312,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, int rv; ipmi_smi_t intf; unsigned long flags; + int version_major; + int version_minor; + version_major = ipmi_version_major(device_id); + version_minor = ipmi_version_minor(device_id); /* Make sure the driver is actually initialized, this handles problems with initialization order. */ @@ -1831,10 +2334,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, if (!intf) return -ENOMEM; memset(intf, 0, sizeof(*intf)); + intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); + if (!intf->bmc) { + kfree(intf); + return -ENOMEM; + } intf->intf_num = -1; kref_init(&intf->refcount); - intf->version_major = version_major; - intf->version_minor = version_minor; + intf->bmc->id = *device_id; + intf->si_dev = si_dev; for (j = 0; j < IPMI_MAX_CHANNELS; j++) { intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; intf->channels[j].lun = 2; @@ -1884,6 +2392,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, caller before sending any messages with it. */ *new_intf = intf; + get_guid(intf); + if ((version_major > 1) || ((version_major == 1) && (version_minor >= 5))) { @@ -1898,6 +2408,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, /* Wait for the channel info to be read. */ wait_event(intf->waitq, intf->curr_channel >= IPMI_MAX_CHANNELS); + intf->null_user_handler = NULL; } else { /* Assume a single IPMB channel at zero. */ intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; @@ -1907,6 +2418,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, if (rv == 0) rv = add_proc_entries(intf, i); + rv = ipmi_bmc_register(intf); + out: if (rv) { if (intf->proc_dir) @@ -1921,7 +2434,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, spin_lock_irqsave(&interfaces_lock, flags); ipmi_interfaces[i] = intf; spin_unlock_irqrestore(&interfaces_lock, flags); - call_smi_watchers(i); + call_smi_watchers(i, intf->si_dev); } return rv; @@ -1933,6 +2446,8 @@ int ipmi_unregister_smi(ipmi_smi_t intf) struct ipmi_smi_watcher *w; unsigned long flags; + ipmi_bmc_unregister(intf); + spin_lock_irqsave(&interfaces_lock, flags); for (i = 0; i < MAX_IPMI_INTERFACES; i++) { if (ipmi_interfaces[i] == intf) { @@ -3196,10 +3711,17 @@ static struct notifier_block panic_block = { static int ipmi_init_msghandler(void) { int i; + int rv; if (initialized) return 0; + rv = driver_register(&ipmidriver); + if (rv) { + printk(KERN_ERR PFX "Could not register IPMI driver\n"); + return rv; + } + printk(KERN_INFO "ipmi message handler version " IPMI_DRIVER_VERSION "\n"); @@ -3222,7 +3744,7 @@ static int ipmi_init_msghandler(void) ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES; add_timer(&ipmi_timer); - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); initialized = 1; @@ -3242,7 +3764,7 @@ static __exit void cleanup_ipmi(void) if (!initialized) return; - notifier_chain_unregister(&panic_notifier_list, &panic_block); + atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); /* This can't be called if any interfaces exist, so no worry about shutting down the interfaces. */ @@ -3256,6 +3778,8 @@ static __exit void cleanup_ipmi(void) remove_proc_entry(proc_ipmi_root->name, &proc_root); #endif /* CONFIG_PROC_FS */ + driver_unregister(&ipmidriver); + initialized = 0; /* Check for buffer leaks. */ diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index e8ed26b77d4c..786a2802ca34 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -464,7 +464,7 @@ static void ipmi_poweroff_function (void) /* Wait for an IPMI interface to be installed, the first one installed will be grabbed by this code and used to perform the powerdown. */ -static void ipmi_po_new_smi(int if_num) +static void ipmi_po_new_smi(int if_num, struct device *device) { struct ipmi_system_interface_addr smi_addr; struct kernel_ipmi_msg send_msg; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index e59b638766ef..35fbd4d8ed4b 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -52,6 +52,7 @@ #include <linux/pci.h> #include <linux/ioport.h> #include <linux/notifier.h> +#include <linux/mutex.h> #include <linux/kthread.h> #include <asm/irq.h> #ifdef CONFIG_HIGH_RES_TIMERS @@ -109,21 +110,15 @@ enum si_intf_state { enum si_type { SI_KCS, SI_SMIC, SI_BT }; +static char *si_to_str[] = { "KCS", "SMIC", "BT" }; -struct ipmi_device_id { - unsigned char device_id; - unsigned char device_revision; - unsigned char firmware_revision_1; - unsigned char firmware_revision_2; - unsigned char ipmi_version; - unsigned char additional_device_support; - unsigned char manufacturer_id[3]; - unsigned char product_id[2]; - unsigned char aux_firmware_revision[4]; -} __attribute__((packed)); - -#define ipmi_version_major(v) ((v)->ipmi_version & 0xf) -#define ipmi_version_minor(v) ((v)->ipmi_version >> 4) +#define DEVICE_NAME "ipmi_si" + +static struct device_driver ipmi_driver = +{ + .name = DEVICE_NAME, + .bus = &platform_bus_type +}; struct smi_info { @@ -147,6 +142,9 @@ struct smi_info int (*irq_setup)(struct smi_info *info); void (*irq_cleanup)(struct smi_info *info); unsigned int io_size; + char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ + void (*addr_source_cleanup)(struct smi_info *info); + void *addr_source_data; /* Per-OEM handler, called from handle_flags(). Returns 1 when handle_flags() needs to be re-run @@ -203,8 +201,17 @@ struct smi_info interrupts. */ int interrupt_disabled; + /* From the get device id response... */ struct ipmi_device_id device_id; + /* Driver model stuff. */ + struct device *dev; + struct platform_device *pdev; + + /* True if we allocated the device, false if it came from + * someplace else (like PCI). */ + int dev_registered; + /* Slave address, could be reported from DMI. */ unsigned char slave_addr; @@ -224,12 +231,16 @@ struct smi_info unsigned long incoming_messages; struct task_struct *thread; + + struct list_head link; }; -static struct notifier_block *xaction_notifier_list; +static int try_smi_init(struct smi_info *smi); + +static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block * nb) { - return notifier_chain_register(&xaction_notifier_list, nb); + return atomic_notifier_chain_register(&xaction_notifier_list, nb); } static void si_restart_short_timer(struct smi_info *smi_info); @@ -271,13 +282,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) spin_lock(&(smi_info->msg_lock)); /* Pick the high priority queue first. */ - if (! list_empty(&(smi_info->hp_xmit_msgs))) { + if (!list_empty(&(smi_info->hp_xmit_msgs))) { entry = smi_info->hp_xmit_msgs.next; - } else if (! list_empty(&(smi_info->xmit_msgs))) { + } else if (!list_empty(&(smi_info->xmit_msgs))) { entry = smi_info->xmit_msgs.next; } - if (! entry) { + if (!entry) { smi_info->curr_msg = NULL; rv = SI_SM_IDLE; } else { @@ -291,7 +302,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) do_gettimeofday(&t); printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif - err = notifier_call_chain(&xaction_notifier_list, 0, smi_info); + err = atomic_notifier_call_chain(&xaction_notifier_list, + 0, smi_info); if (err & NOTIFY_STOP_MASK) { rv = SI_SM_CALL_WITHOUT_DELAY; goto out; @@ -344,7 +356,7 @@ static void start_clear_flags(struct smi_info *smi_info) memory, we will re-enable the interrupt. */ static inline void disable_si_irq(struct smi_info *smi_info) { - if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { disable_irq_nosync(smi_info->irq); smi_info->interrupt_disabled = 1; } @@ -375,7 +387,7 @@ static void handle_flags(struct smi_info *smi_info) } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (! smi_info->curr_msg) { + if (!smi_info->curr_msg) { disable_si_irq(smi_info); smi_info->si_state = SI_NORMAL; return; @@ -394,7 +406,7 @@ static void handle_flags(struct smi_info *smi_info) } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (! smi_info->curr_msg) { + if (!smi_info->curr_msg) { disable_si_irq(smi_info); smi_info->si_state = SI_NORMAL; return; @@ -430,7 +442,7 @@ static void handle_transaction_done(struct smi_info *smi_info) #endif switch (smi_info->si_state) { case SI_NORMAL: - if (! smi_info->curr_msg) + if (!smi_info->curr_msg) break; smi_info->curr_msg->rsp_size @@ -880,7 +892,7 @@ static void smi_timeout(unsigned long data) smi_info->last_timeout_jiffies = jiffies_now; - if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; spin_lock_irqsave(&smi_info->count_lock, flags); @@ -974,15 +986,10 @@ static struct ipmi_smi_handlers handlers = a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ #define SI_MAX_PARMS 4 -#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2) -static struct smi_info *smi_infos[SI_MAX_DRIVERS] = -{ NULL, NULL, NULL, NULL }; +static LIST_HEAD(smi_infos); +static DECLARE_MUTEX(smi_infos_lock); +static int smi_num; /* Used to sequence the SMIs */ -#define DEVICE_NAME "ipmi_si" - -#define DEFAULT_KCS_IO_PORT 0xca2 -#define DEFAULT_SMIC_IO_PORT 0xca9 -#define DEFAULT_BT_IO_PORT 0xe4 #define DEFAULT_REGSPACING 1 static int si_trydefaults = 1; @@ -1053,38 +1060,23 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" " by interface number."); +#define IPMI_IO_ADDR_SPACE 0 #define IPMI_MEM_ADDR_SPACE 1 -#define IPMI_IO_ADDR_SPACE 2 +static char *addr_space_to_str[] = { "I/O", "memory" }; -#if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI) -static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr) +static void std_irq_cleanup(struct smi_info *info) { - int i; - - for (i = 0; i < SI_MAX_PARMS; ++i) { - /* Don't check our address. */ - if (i == intf) - continue; - if (si_type[i] != NULL) { - if ((addr_space == IPMI_MEM_ADDR_SPACE && - base_addr == addrs[i]) || - (addr_space == IPMI_IO_ADDR_SPACE && - base_addr == ports[i])) - return 0; - } - else - break; - } - - return 1; + if (info->si_type == SI_BT) + /* Disable the interrupt in the BT interface. */ + info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); + free_irq(info->irq, info); } -#endif static int std_irq_setup(struct smi_info *info) { int rv; - if (! info->irq) + if (!info->irq) return 0; if (info->si_type == SI_BT) { @@ -1093,7 +1085,7 @@ static int std_irq_setup(struct smi_info *info) SA_INTERRUPT, DEVICE_NAME, info); - if (! rv) + if (!rv) /* Enable the interrupt in the BT interface. */ info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); @@ -1110,88 +1102,77 @@ static int std_irq_setup(struct smi_info *info) DEVICE_NAME, info->irq); info->irq = 0; } else { + info->irq_cleanup = std_irq_cleanup; printk(" Using irq %d\n", info->irq); } return rv; } -static void std_irq_cleanup(struct smi_info *info) -{ - if (! info->irq) - return; - - if (info->si_type == SI_BT) - /* Disable the interrupt in the BT interface. */ - info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); - free_irq(info->irq, info); -} - static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - return inb((*addr)+(offset*io->regspacing)); + return inb(addr + (offset * io->regspacing)); } static void port_outb(struct si_sm_io *io, unsigned int offset, unsigned char b) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - outb(b, (*addr)+(offset * io->regspacing)); + outb(b, addr + (offset * io->regspacing)); } static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; + return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } static void port_outw(struct si_sm_io *io, unsigned int offset, unsigned char b) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - outw(b << io->regshift, (*addr)+(offset * io->regspacing)); + outw(b << io->regshift, addr + (offset * io->regspacing)); } static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; + return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } static void port_outl(struct si_sm_io *io, unsigned int offset, unsigned char b) { - unsigned int *addr = io->info; + unsigned int addr = io->addr_data; - outl(b << io->regshift, (*addr)+(offset * io->regspacing)); + outl(b << io->regshift, addr+(offset * io->regspacing)); } static void port_cleanup(struct smi_info *info) { - unsigned int *addr = info->io.info; - int mapsize; + unsigned int addr = info->io.addr_data; + int mapsize; - if (addr && (*addr)) { + if (addr) { mapsize = ((info->io_size * info->io.regspacing) - (info->io.regspacing - info->io.regsize)); - release_region (*addr, mapsize); + release_region (addr, mapsize); } - kfree(info); } static int port_setup(struct smi_info *info) { - unsigned int *addr = info->io.info; - int mapsize; + unsigned int addr = info->io.addr_data; + int mapsize; - if (! addr || (! *addr)) + if (!addr) return -ENODEV; info->io_cleanup = port_cleanup; @@ -1225,51 +1206,11 @@ static int port_setup(struct smi_info *info) mapsize = ((info->io_size * info->io.regspacing) - (info->io.regspacing - info->io.regsize)); - if (request_region(*addr, mapsize, DEVICE_NAME) == NULL) + if (request_region(addr, mapsize, DEVICE_NAME) == NULL) return -EIO; return 0; } -static int try_init_port(int intf_num, struct smi_info **new_info) -{ - struct smi_info *info; - - if (! ports[intf_num]) - return -ENODEV; - - if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE, - ports[intf_num])) - return -ENODEV; - - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (! info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n"); - return -ENOMEM; - } - memset(info, 0, sizeof(*info)); - - info->io_setup = port_setup; - info->io.info = &(ports[intf_num]); - info->io.addr = NULL; - info->io.regspacing = regspacings[intf_num]; - if (! info->io.regspacing) - info->io.regspacing = DEFAULT_REGSPACING; - info->io.regsize = regsizes[intf_num]; - if (! info->io.regsize) - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = regshifts[intf_num]; - info->irq = 0; - info->irq_setup = NULL; - *new_info = info; - - if (si_type[intf_num] == NULL) - si_type[intf_num] = "kcs"; - - printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n", - si_type[intf_num], ports[intf_num]); - return 0; -} - static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) { return readb((io->addr)+(offset * io->regspacing)); @@ -1321,7 +1262,7 @@ static void mem_outq(struct si_sm_io *io, unsigned int offset, static void mem_cleanup(struct smi_info *info) { - unsigned long *addr = info->io.info; + unsigned long addr = info->io.addr_data; int mapsize; if (info->io.addr) { @@ -1330,17 +1271,16 @@ static void mem_cleanup(struct smi_info *info) mapsize = ((info->io_size * info->io.regspacing) - (info->io.regspacing - info->io.regsize)); - release_mem_region(*addr, mapsize); + release_mem_region(addr, mapsize); } - kfree(info); } static int mem_setup(struct smi_info *info) { - unsigned long *addr = info->io.info; + unsigned long addr = info->io.addr_data; int mapsize; - if (! addr || (! *addr)) + if (!addr) return -ENODEV; info->io_cleanup = mem_cleanup; @@ -1380,57 +1320,83 @@ static int mem_setup(struct smi_info *info) mapsize = ((info->io_size * info->io.regspacing) - (info->io.regspacing - info->io.regsize)); - if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL) + if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) return -EIO; - info->io.addr = ioremap(*addr, mapsize); + info->io.addr = ioremap(addr, mapsize); if (info->io.addr == NULL) { - release_mem_region(*addr, mapsize); + release_mem_region(addr, mapsize); return -EIO; } return 0; } -static int try_init_mem(int intf_num, struct smi_info **new_info) + +static __devinit void hardcode_find_bmc(void) { + int i; struct smi_info *info; - if (! addrs[intf_num]) - return -ENODEV; + for (i = 0; i < SI_MAX_PARMS; i++) { + if (!ports[i] && !addrs[i]) + continue; - if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, - addrs[intf_num])) - return -ENODEV; + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (! info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n"); - return -ENOMEM; - } - memset(info, 0, sizeof(*info)); + info->addr_source = "hardcoded"; - info->io_setup = mem_setup; - info->io.info = &addrs[intf_num]; - info->io.addr = NULL; - info->io.regspacing = regspacings[intf_num]; - if (! info->io.regspacing) - info->io.regspacing = DEFAULT_REGSPACING; - info->io.regsize = regsizes[intf_num]; - if (! info->io.regsize) - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = regshifts[intf_num]; - info->irq = 0; - info->irq_setup = NULL; - *new_info = info; + if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { + info->si_type = SI_KCS; + } else if (strcmp(si_type[i], "smic") == 0) { + info->si_type = SI_SMIC; + } else if (strcmp(si_type[i], "bt") == 0) { + info->si_type = SI_BT; + } else { + printk(KERN_WARNING + "ipmi_si: Interface type specified " + "for interface %d, was invalid: %s\n", + i, si_type[i]); + kfree(info); + continue; + } - if (si_type[intf_num] == NULL) - si_type[intf_num] = "kcs"; + if (ports[i]) { + /* An I/O port */ + info->io_setup = port_setup; + info->io.addr_data = ports[i]; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else if (addrs[i]) { + /* A memory port */ + info->io_setup = mem_setup; + info->io.addr_data = addrs[i]; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } else { + printk(KERN_WARNING + "ipmi_si: Interface type specified " + "for interface %d, " + "but port and address were not set or " + "set to zero.\n", i); + kfree(info); + continue; + } - printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n", - si_type[intf_num], addrs[intf_num]); - return 0; -} + info->io.addr = NULL; + info->io.regspacing = regspacings[i]; + if (!info->io.regspacing) + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = regsizes[i]; + if (!info->io.regsize) + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = regshifts[i]; + info->irq = irqs[i]; + if (info->irq) + info->irq_setup = std_irq_setup; + try_smi_init(info); + } +} #ifdef CONFIG_ACPI @@ -1470,11 +1436,19 @@ static u32 ipmi_acpi_gpe(void *context) return ACPI_INTERRUPT_HANDLED; } +static void acpi_gpe_irq_cleanup(struct smi_info *info) +{ + if (!info->irq) + return; + + acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); +} + static int acpi_gpe_irq_setup(struct smi_info *info) { acpi_status status; - if (! info->irq) + if (!info->irq) return 0; /* FIXME - is level triggered right? */ @@ -1491,19 +1465,12 @@ static int acpi_gpe_irq_setup(struct smi_info *info) info->irq = 0; return -EINVAL; } else { + info->irq_cleanup = acpi_gpe_irq_cleanup; printk(" Using ACPI GPE %d\n", info->irq); return 0; } } -static void acpi_gpe_irq_cleanup(struct smi_info *info) -{ - if (! info->irq) - return; - - acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); -} - /* * Defined at * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf @@ -1546,28 +1513,12 @@ struct SPMITable { s8 spmi_id[1]; /* A '\0' terminated array starts here. */ }; -static int try_init_acpi(int intf_num, struct smi_info **new_info) +static __devinit int try_init_acpi(struct SPMITable *spmi) { struct smi_info *info; - acpi_status status; - struct SPMITable *spmi; char *io_type; u8 addr_space; - if (acpi_disabled) - return -ENODEV; - - if (acpi_failure) - return -ENODEV; - - status = acpi_get_firmware_table("SPMI", intf_num+1, - ACPI_LOGICAL_ADDRESSING, - (struct acpi_table_header **) &spmi); - if (status != AE_OK) { - acpi_failure = 1; - return -ENODEV; - } - if (spmi->IPMIlegacy != 1) { printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); return -ENODEV; @@ -1577,47 +1528,42 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info) addr_space = IPMI_MEM_ADDR_SPACE; else addr_space = IPMI_IO_ADDR_SPACE; - if (! is_new_interface(-1, addr_space, spmi->addr.address)) - return -ENODEV; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); + return -ENOMEM; + } + + info->addr_source = "ACPI"; /* Figure out the interface type. */ switch (spmi->InterfaceType) { case 1: /* KCS */ - si_type[intf_num] = "kcs"; + info->si_type = SI_KCS; break; - case 2: /* SMIC */ - si_type[intf_num] = "smic"; + info->si_type = SI_SMIC; break; - case 3: /* BT */ - si_type[intf_num] = "bt"; + info->si_type = SI_BT; break; - default: printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", spmi->InterfaceType); + kfree(info); return -EIO; } - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (! info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); - return -ENOMEM; - } - memset(info, 0, sizeof(*info)); - if (spmi->InterruptType & 1) { /* We've got a GPE interrupt. */ info->irq = spmi->GPE; info->irq_setup = acpi_gpe_irq_setup; - info->irq_cleanup = acpi_gpe_irq_cleanup; } else if (spmi->InterruptType & 2) { /* We've got an APIC/SAPIC interrupt. */ info->irq = spmi->GlobalSystemInterrupt; info->irq_setup = std_irq_setup; - info->irq_cleanup = std_irq_cleanup; } else { /* Use the default interrupt setting. */ info->irq = 0; @@ -1626,43 +1572,60 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info) if (spmi->addr.register_bit_width) { /* A (hopefully) properly formed register bit width. */ - regspacings[intf_num] = spmi->addr.register_bit_width / 8; info->io.regspacing = spmi->addr.register_bit_width / 8; } else { - regspacings[intf_num] = DEFAULT_REGSPACING; info->io.regspacing = DEFAULT_REGSPACING; } - regsizes[intf_num] = regspacings[intf_num]; - info->io.regsize = regsizes[intf_num]; - regshifts[intf_num] = spmi->addr.register_bit_offset; - info->io.regshift = regshifts[intf_num]; + info->io.regsize = info->io.regspacing; + info->io.regshift = spmi->addr.register_bit_offset; if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { io_type = "memory"; info->io_setup = mem_setup; - addrs[intf_num] = spmi->addr.address; - info->io.info = &(addrs[intf_num]); + info->io.addr_type = IPMI_IO_ADDR_SPACE; } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { io_type = "I/O"; info->io_setup = port_setup; - ports[intf_num] = spmi->addr.address; - info->io.info = &(ports[intf_num]); + info->io.addr_type = IPMI_MEM_ADDR_SPACE; } else { kfree(info); printk("ipmi_si: Unknown ACPI I/O Address type\n"); return -EIO; } + info->io.addr_data = spmi->addr.address; - *new_info = info; + try_smi_init(info); - printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n", - si_type[intf_num], io_type, (unsigned long) spmi->addr.address); return 0; } + +static __devinit void acpi_find_bmc(void) +{ + acpi_status status; + struct SPMITable *spmi; + int i; + + if (acpi_disabled) + return; + + if (acpi_failure) + return; + + for (i = 0; ; i++) { + status = acpi_get_firmware_table("SPMI", i+1, + ACPI_LOGICAL_ADDRESSING, + (struct acpi_table_header **) + &spmi); + if (status != AE_OK) + return; + + try_init_acpi(spmi); + } +} #endif #ifdef CONFIG_DMI -typedef struct dmi_ipmi_data +struct dmi_ipmi_data { u8 type; u8 addr_space; @@ -1670,49 +1633,46 @@ typedef struct dmi_ipmi_data u8 irq; u8 offset; u8 slave_addr; -} dmi_ipmi_data_t; - -static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS]; -static int dmi_data_entries; +}; -static int __init decode_dmi(struct dmi_header *dm, int intf_num) +static int __devinit decode_dmi(struct dmi_header *dm, + struct dmi_ipmi_data *dmi) { u8 *data = (u8 *)dm; unsigned long base_addr; u8 reg_spacing; u8 len = dm->length; - dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; - ipmi_data->type = data[4]; + dmi->type = data[4]; memcpy(&base_addr, data+8, sizeof(unsigned long)); if (len >= 0x11) { if (base_addr & 1) { /* I/O */ base_addr &= 0xFFFE; - ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; + dmi->addr_space = IPMI_IO_ADDR_SPACE; } else { /* Memory */ - ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE; + dmi->addr_space = IPMI_MEM_ADDR_SPACE; } /* If bit 4 of byte 0x10 is set, then the lsb for the address is odd. */ - ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); + dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); - ipmi_data->irq = data[0x11]; + dmi->irq = data[0x11]; /* The top two bits of byte 0x10 hold the register spacing. */ reg_spacing = (data[0x10] & 0xC0) >> 6; switch(reg_spacing){ case 0x00: /* Byte boundaries */ - ipmi_data->offset = 1; + dmi->offset = 1; break; case 0x01: /* 32-bit boundaries */ - ipmi_data->offset = 4; + dmi->offset = 4; break; case 0x02: /* 16-byte boundaries */ - ipmi_data->offset = 16; + dmi->offset = 16; break; default: /* Some other interface, just ignore it. */ @@ -1726,217 +1686,227 @@ static int __init decode_dmi(struct dmi_header *dm, int intf_num) * wrong (and all that I have seen are I/O) so we just * ignore that bit and assume I/O. Systems that use * memory should use the newer spec, anyway. */ - ipmi_data->base_addr = base_addr & 0xfffe; - ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; - ipmi_data->offset = 1; - } - - ipmi_data->slave_addr = data[6]; - - if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) { - dmi_data_entries++; - return 0; + dmi->base_addr = base_addr & 0xfffe; + dmi->addr_space = IPMI_IO_ADDR_SPACE; + dmi->offset = 1; } - memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t)); + dmi->slave_addr = data[6]; - return -1; + return 0; } -static void __init dmi_find_bmc(void) +static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) { - struct dmi_device *dev = NULL; - int intf_num = 0; - - while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { - if (intf_num >= SI_MAX_DRIVERS) - break; + struct smi_info *info; - decode_dmi((struct dmi_header *) dev->device_data, intf_num++); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + printk(KERN_ERR + "ipmi_si: Could not allocate SI data\n"); + return; } -} - -static int try_init_smbios(int intf_num, struct smi_info **new_info) -{ - struct smi_info *info; - dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; - char *io_type; - if (intf_num >= dmi_data_entries) - return -ENODEV; + info->addr_source = "SMBIOS"; switch (ipmi_data->type) { - case 0x01: /* KCS */ - si_type[intf_num] = "kcs"; - break; - case 0x02: /* SMIC */ - si_type[intf_num] = "smic"; - break; - case 0x03: /* BT */ - si_type[intf_num] = "bt"; - break; - default: - return -EIO; - } - - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (! info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n"); - return -ENOMEM; + case 0x01: /* KCS */ + info->si_type = SI_KCS; + break; + case 0x02: /* SMIC */ + info->si_type = SI_SMIC; + break; + case 0x03: /* BT */ + info->si_type = SI_BT; + break; + default: + return; } - memset(info, 0, sizeof(*info)); - if (ipmi_data->addr_space == 1) { - io_type = "memory"; + switch (ipmi_data->addr_space) { + case IPMI_MEM_ADDR_SPACE: info->io_setup = mem_setup; - addrs[intf_num] = ipmi_data->base_addr; - info->io.info = &(addrs[intf_num]); - } else if (ipmi_data->addr_space == 2) { - io_type = "I/O"; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + break; + + case IPMI_IO_ADDR_SPACE: info->io_setup = port_setup; - ports[intf_num] = ipmi_data->base_addr; - info->io.info = &(ports[intf_num]); - } else { + info->io.addr_type = IPMI_IO_ADDR_SPACE; + break; + + default: kfree(info); - printk("ipmi_si: Unknown SMBIOS I/O Address type.\n"); - return -EIO; + printk(KERN_WARNING + "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", + ipmi_data->addr_space); + return; } + info->io.addr_data = ipmi_data->base_addr; - regspacings[intf_num] = ipmi_data->offset; - info->io.regspacing = regspacings[intf_num]; - if (! info->io.regspacing) + info->io.regspacing = ipmi_data->offset; + if (!info->io.regspacing) info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = regshifts[intf_num]; + info->io.regshift = 0; info->slave_addr = ipmi_data->slave_addr; - irqs[intf_num] = ipmi_data->irq; + info->irq = ipmi_data->irq; + if (info->irq) + info->irq_setup = std_irq_setup; - *new_info = info; + try_smi_init(info); +} - printk("ipmi_si: Found SMBIOS-specified state machine at %s" - " address 0x%lx, slave address 0x%x\n", - io_type, (unsigned long)ipmi_data->base_addr, - ipmi_data->slave_addr); - return 0; +static void __devinit dmi_find_bmc(void) +{ + struct dmi_device *dev = NULL; + struct dmi_ipmi_data data; + int rv; + + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { + rv = decode_dmi((struct dmi_header *) dev->device_data, &data); + if (!rv) + try_init_dmi(&data); + } } #endif /* CONFIG_DMI */ #ifdef CONFIG_PCI -#define PCI_ERMC_CLASSCODE 0x0C0700 +#define PCI_ERMC_CLASSCODE 0x0C0700 +#define PCI_ERMC_CLASSCODE_MASK 0xffffff00 +#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff +#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 +#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 +#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 + #define PCI_HP_VENDOR_ID 0x103C #define PCI_MMC_DEVICE_ID 0x121A #define PCI_MMC_ADDR_CW 0x10 -/* Avoid more than one attempt to probe pci smic. */ -static int pci_smic_checked = 0; +static void ipmi_pci_cleanup(struct smi_info *info) +{ + struct pci_dev *pdev = info->addr_source_data; + + pci_disable_device(pdev); +} -static int find_pci_smic(int intf_num, struct smi_info **new_info) +static int __devinit ipmi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { - struct smi_info *info; - int error; - struct pci_dev *pci_dev = NULL; - u16 base_addr; - int fe_rmc = 0; + int rv; + int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; + struct smi_info *info; + int first_reg_offset = 0; - if (pci_smic_checked) - return -ENODEV; + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ENOMEM; - pci_smic_checked = 1; + info->addr_source = "PCI"; - pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL); - if (! pci_dev) { - pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL); - if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)) - fe_rmc = 1; - else - return -ENODEV; - } + switch (class_type) { + case PCI_ERMC_CLASSCODE_TYPE_SMIC: + info->si_type = SI_SMIC; + break; - error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); - if (error) - { - pci_dev_put(pci_dev); - printk(KERN_ERR - "ipmi_si: pci_read_config_word() failed (%d).\n", - error); - return -ENODEV; + case PCI_ERMC_CLASSCODE_TYPE_KCS: + info->si_type = SI_KCS; + break; + + case PCI_ERMC_CLASSCODE_TYPE_BT: + info->si_type = SI_BT; + break; + + default: + kfree(info); + printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", + pci_name(pdev), class_type); + return ENOMEM; } - /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ - if (! (base_addr & 0x0001)) - { - pci_dev_put(pci_dev); - printk(KERN_ERR - "ipmi_si: memory mapped I/O not supported for PCI" - " smic.\n"); - return -ENODEV; + rv = pci_enable_device(pdev); + if (rv) { + printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", + pci_name(pdev)); + kfree(info); + return rv; } - base_addr &= 0xFFFE; - if (! fe_rmc) - /* Data register starts at base address + 1 in eRMC */ - ++base_addr; + info->addr_source_cleanup = ipmi_pci_cleanup; + info->addr_source_data = pdev; - if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { - pci_dev_put(pci_dev); - return -ENODEV; - } + if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID) + first_reg_offset = 1; - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (! info) { - pci_dev_put(pci_dev); - printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); - return -ENOMEM; + if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; } - memset(info, 0, sizeof(*info)); + info->io.addr_data = pci_resource_start(pdev, 0); - info->io_setup = port_setup; - ports[intf_num] = base_addr; - info->io.info = &(ports[intf_num]); - info->io.regspacing = regspacings[intf_num]; - if (! info->io.regspacing) - info->io.regspacing = DEFAULT_REGSPACING; + info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = regshifts[intf_num]; + info->io.regshift = 0; - *new_info = info; + info->irq = pdev->irq; + if (info->irq) + info->irq_setup = std_irq_setup; - irqs[intf_num] = pci_dev->irq; - si_type[intf_num] = "smic"; + info->dev = &pdev->dev; - printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n", - (long unsigned int) base_addr); + return try_smi_init(info); +} - pci_dev_put(pci_dev); +static void __devexit ipmi_pci_remove(struct pci_dev *pdev) +{ +} + +#ifdef CONFIG_PM +static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ return 0; } -#endif /* CONFIG_PCI */ -static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) +static int ipmi_pci_resume(struct pci_dev *pdev) { -#ifdef CONFIG_PCI - if (find_pci_smic(intf_num, new_info) == 0) - return 0; + return 0; +} #endif - /* Include other methods here. */ - return -ENODEV; -} +static struct pci_device_id ipmi_pci_devices[] = { + { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, + { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) } +}; +MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); + +static struct pci_driver ipmi_pci_driver = { + .name = DEVICE_NAME, + .id_table = ipmi_pci_devices, + .probe = ipmi_pci_probe, + .remove = __devexit_p(ipmi_pci_remove), +#ifdef CONFIG_PM + .suspend = ipmi_pci_suspend, + .resume = ipmi_pci_resume, +#endif +}; +#endif /* CONFIG_PCI */ static int try_get_dev_id(struct smi_info *smi_info) { - unsigned char msg[2]; - unsigned char *resp; - unsigned long resp_len; - enum si_sm_result smi_result; - int rv = 0; + unsigned char msg[2]; + unsigned char *resp; + unsigned long resp_len; + enum si_sm_result smi_result; + int rv = 0; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); - if (! resp) + if (!resp) return -ENOMEM; /* Do a Get Device ID command, since it comes back with some @@ -1972,7 +1942,7 @@ static int try_get_dev_id(struct smi_info *smi_info) /* Otherwise, we got some data. */ resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); - if (resp_len < 6) { + if (resp_len < 14) { /* That's odd, it should be longer. */ rv = -EINVAL; goto out; @@ -1985,8 +1955,7 @@ static int try_get_dev_id(struct smi_info *smi_info) } /* Record info from the get device id, in case we need it. */ - memcpy(&smi_info->device_id, &resp[3], - min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id))); + ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id); out: kfree(resp); @@ -2018,7 +1987,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off, struct smi_info *smi = data; out += sprintf(out, "interrupts_enabled: %d\n", - smi->irq && ! smi->interrupt_disabled); + smi->irq && !smi->interrupt_disabled); out += sprintf(out, "short_timeouts: %ld\n", smi->short_timeouts); out += sprintf(out, "long_timeouts: %ld\n", @@ -2089,15 +2058,14 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 -#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} +#define DELL_IANA_MFR_ID 0x0002a2 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; - const char mfr[3]=DELL_IANA_MFR_ID; - if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) { + if (id->manufacturer_id == DELL_IANA_MFR_ID) { if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && - id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { + id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { smi_info->oem_data_avail_handler = oem_data_avail_to_receive_msg_avail; } @@ -2169,8 +2137,7 @@ static void setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; - const char mfr[3]=DELL_IANA_MFR_ID; - if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) && + if (id->manufacturer_id == DELL_IANA_MFR_ID && smi_info->si_type == SI_BT) register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); } @@ -2200,62 +2167,110 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info) del_timer_sync(&smi_info->si_timer); } -/* Returns 0 if initialized, or negative on an error. */ -static int init_one_smi(int intf_num, struct smi_info **smi) +static struct ipmi_default_vals { - int rv; - struct smi_info *new_smi; + int type; + int port; +} __devinit ipmi_defaults[] = +{ + { .type = SI_KCS, .port = 0xca2 }, + { .type = SI_SMIC, .port = 0xca9 }, + { .type = SI_BT, .port = 0xe4 }, + { .port = 0 } +}; +static __devinit void default_find_bmc(void) +{ + struct smi_info *info; + int i; - rv = try_init_mem(intf_num, &new_smi); - if (rv) - rv = try_init_port(intf_num, &new_smi); -#ifdef CONFIG_ACPI - if (rv && si_trydefaults) - rv = try_init_acpi(intf_num, &new_smi); -#endif -#ifdef CONFIG_DMI - if (rv && si_trydefaults) - rv = try_init_smbios(intf_num, &new_smi); -#endif - if (rv && si_trydefaults) - rv = try_init_plug_and_play(intf_num, &new_smi); + for (i = 0; ; i++) { + if (!ipmi_defaults[i].port) + break; - if (rv) - return rv; + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; - /* So we know not to free it unless we have allocated one. */ - new_smi->intf = NULL; - new_smi->si_sm = NULL; - new_smi->handlers = NULL; + info->addr_source = NULL; - if (! new_smi->irq_setup) { - new_smi->irq = irqs[intf_num]; - new_smi->irq_setup = std_irq_setup; - new_smi->irq_cleanup = std_irq_cleanup; - } + info->si_type = ipmi_defaults[i].type; + info->io_setup = port_setup; + info->io.addr_data = ipmi_defaults[i].port; + info->io.addr_type = IPMI_IO_ADDR_SPACE; - /* Default to KCS if no type is specified. */ - if (si_type[intf_num] == NULL) { - if (si_trydefaults) - si_type[intf_num] = "kcs"; - else { - rv = -EINVAL; - goto out_err; + info->io.addr = NULL; + info->io.regspacing = DEFAULT_REGSPACING; + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + if (try_smi_init(info) == 0) { + /* Found one... */ + printk(KERN_INFO "ipmi_si: Found default %s state" + " machine at %s address 0x%lx\n", + si_to_str[info->si_type], + addr_space_to_str[info->io.addr_type], + info->io.addr_data); + return; } } +} + +static int is_new_interface(struct smi_info *info) +{ + struct smi_info *e; + + list_for_each_entry(e, &smi_infos, link) { + if (e->io.addr_type != info->io.addr_type) + continue; + if (e->io.addr_data == info->io.addr_data) + return 0; + } + + return 1; +} + +static int try_smi_init(struct smi_info *new_smi) +{ + int rv; + + if (new_smi->addr_source) { + printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" + " machine at %s address 0x%lx, slave address 0x%x," + " irq %d\n", + new_smi->addr_source, + si_to_str[new_smi->si_type], + addr_space_to_str[new_smi->io.addr_type], + new_smi->io.addr_data, + new_smi->slave_addr, new_smi->irq); + } + + down(&smi_infos_lock); + if (!is_new_interface(new_smi)) { + printk(KERN_WARNING "ipmi_si: duplicate interface\n"); + rv = -EBUSY; + goto out_err; + } - /* Set up the state machine to use. */ - if (strcmp(si_type[intf_num], "kcs") == 0) { + /* So we know not to free it unless we have allocated one. */ + new_smi->intf = NULL; + new_smi->si_sm = NULL; + new_smi->handlers = NULL; + + switch (new_smi->si_type) { + case SI_KCS: new_smi->handlers = &kcs_smi_handlers; - new_smi->si_type = SI_KCS; - } else if (strcmp(si_type[intf_num], "smic") == 0) { + break; + + case SI_SMIC: new_smi->handlers = &smic_smi_handlers; - new_smi->si_type = SI_SMIC; - } else if (strcmp(si_type[intf_num], "bt") == 0) { + break; + + case SI_BT: new_smi->handlers = &bt_smi_handlers; - new_smi->si_type = SI_BT; - } else { + break; + + default: /* No support for anything else yet. */ rv = -EIO; goto out_err; @@ -2263,7 +2278,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) /* Allocate the state machine's data and initialize it. */ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); - if (! new_smi->si_sm) { + if (!new_smi->si_sm) { printk(" Could not allocate state machine memory\n"); rv = -ENOMEM; goto out_err; @@ -2284,21 +2299,29 @@ static int init_one_smi(int intf_num, struct smi_info **smi) /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { + if (new_smi->addr_source) + printk(KERN_INFO "ipmi_si: Interface detection" + " failed\n"); rv = -ENODEV; goto out_err; } /* Attempt a get device id command. If it fails, we probably - don't have a SMI here. */ + don't have a BMC here. */ rv = try_get_dev_id(new_smi); - if (rv) + if (rv) { + if (new_smi->addr_source) + printk(KERN_INFO "ipmi_si: There appears to be no BMC" + " at this location\n"); goto out_err; + } setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); /* Try to claim any interrupts. */ - new_smi->irq_setup(new_smi); + if (new_smi->irq_setup) + new_smi->irq_setup(new_smi); INIT_LIST_HEAD(&(new_smi->xmit_msgs)); INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); @@ -2308,7 +2331,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) new_smi->interrupt_disabled = 0; atomic_set(&new_smi->stop_operation, 0); - new_smi->intf_num = intf_num; + new_smi->intf_num = smi_num; + smi_num++; /* Start clearing the flags before we enable interrupts or the timer to avoid racing with the timer. */ @@ -2332,10 +2356,36 @@ static int init_one_smi(int intf_num, struct smi_info **smi) new_smi->thread = kthread_run(ipmi_thread, new_smi, "kipmi%d", new_smi->intf_num); + if (!new_smi->dev) { + /* If we don't already have a device from something + * else (like PCI), then register a new one. */ + new_smi->pdev = platform_device_alloc("ipmi_si", + new_smi->intf_num); + if (rv) { + printk(KERN_ERR + "ipmi_si_intf:" + " Unable to allocate platform device\n"); + goto out_err_stop_timer; + } + new_smi->dev = &new_smi->pdev->dev; + new_smi->dev->driver = &ipmi_driver; + + rv = platform_device_register(new_smi->pdev); + if (rv) { + printk(KERN_ERR + "ipmi_si_intf:" + " Unable to register system interface device:" + " %d\n", + rv); + goto out_err_stop_timer; + } + new_smi->dev_registered = 1; + } + rv = ipmi_register_smi(&handlers, new_smi, - ipmi_version_major(&new_smi->device_id), - ipmi_version_minor(&new_smi->device_id), + &new_smi->device_id, + new_smi->dev, new_smi->slave_addr, &(new_smi->intf)); if (rv) { @@ -2365,9 +2415,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi) goto out_err_stop_timer; } - *smi = new_smi; + list_add_tail(&new_smi->link, &smi_infos); + + up(&smi_infos_lock); - printk(" IPMI %s interface initialized\n", si_type[intf_num]); + printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); return 0; @@ -2379,7 +2431,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) if (new_smi->intf) ipmi_unregister_smi(new_smi->intf); - new_smi->irq_cleanup(new_smi); + if (new_smi->irq_cleanup) + new_smi->irq_cleanup(new_smi); /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the @@ -2391,23 +2444,41 @@ static int init_one_smi(int intf_num, struct smi_info **smi) new_smi->handlers->cleanup(new_smi->si_sm); kfree(new_smi->si_sm); } + if (new_smi->addr_source_cleanup) + new_smi->addr_source_cleanup(new_smi); if (new_smi->io_cleanup) new_smi->io_cleanup(new_smi); + if (new_smi->dev_registered) + platform_device_unregister(new_smi->pdev); + + kfree(new_smi); + + up(&smi_infos_lock); + return rv; } -static __init int init_ipmi_si(void) +static __devinit int init_ipmi_si(void) { - int rv = 0; - int pos = 0; int i; char *str; + int rv; if (initialized) return 0; initialized = 1; + /* Register the device drivers. */ + rv = driver_register(&ipmi_driver); + if (rv) { + printk(KERN_ERR + "init_ipmi_si: Unable to register driver: %d\n", + rv); + return rv; + } + + /* Parse out the si_type string into its components. */ str = si_type_str; if (*str != '\0') { @@ -2425,63 +2496,66 @@ static __init int init_ipmi_si(void) printk(KERN_INFO "IPMI System Interface driver.\n"); + hardcode_find_bmc(); + #ifdef CONFIG_DMI dmi_find_bmc(); #endif - rv = init_one_smi(0, &(smi_infos[pos])); - if (rv && ! ports[0] && si_trydefaults) { - /* If we are trying defaults and the initial port is - not set, then set it. */ - si_type[0] = "kcs"; - ports[0] = DEFAULT_KCS_IO_PORT; - rv = init_one_smi(0, &(smi_infos[pos])); - if (rv) { - /* No KCS - try SMIC */ - si_type[0] = "smic"; - ports[0] = DEFAULT_SMIC_IO_PORT; - rv = init_one_smi(0, &(smi_infos[pos])); - } - if (rv) { - /* No SMIC - try BT */ - si_type[0] = "bt"; - ports[0] = DEFAULT_BT_IO_PORT; - rv = init_one_smi(0, &(smi_infos[pos])); - } - } - if (rv == 0) - pos++; +#ifdef CONFIG_ACPI + if (si_trydefaults) + acpi_find_bmc(); +#endif - for (i = 1; i < SI_MAX_PARMS; i++) { - rv = init_one_smi(i, &(smi_infos[pos])); - if (rv == 0) - pos++; +#ifdef CONFIG_PCI + pci_module_init(&ipmi_pci_driver); +#endif + + if (si_trydefaults) { + down(&smi_infos_lock); + if (list_empty(&smi_infos)) { + /* No BMC was found, try defaults. */ + up(&smi_infos_lock); + default_find_bmc(); + } else { + up(&smi_infos_lock); + } } - if (smi_infos[0] == NULL) { + down(&smi_infos_lock); + if (list_empty(&smi_infos)) { + up(&smi_infos_lock); +#ifdef CONFIG_PCI + pci_unregister_driver(&ipmi_pci_driver); +#endif printk("ipmi_si: Unable to find any System Interface(s)\n"); return -ENODEV; + } else { + up(&smi_infos_lock); + return 0; } - - return 0; } module_init(init_ipmi_si); -static void __exit cleanup_one_si(struct smi_info *to_clean) +static void __devexit cleanup_one_si(struct smi_info *to_clean) { int rv; unsigned long flags; - if (! to_clean) + if (!to_clean) return; + list_del(&to_clean->link); + /* Tell the timer and interrupt handlers that we are shutting down. */ spin_lock_irqsave(&(to_clean->si_lock), flags); spin_lock(&(to_clean->msg_lock)); atomic_inc(&to_clean->stop_operation); - to_clean->irq_cleanup(to_clean); + + if (to_clean->irq_cleanup) + to_clean->irq_cleanup(to_clean); spin_unlock(&(to_clean->msg_lock)); spin_unlock_irqrestore(&(to_clean->si_lock), flags); @@ -2511,20 +2585,34 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) kfree(to_clean->si_sm); + if (to_clean->addr_source_cleanup) + to_clean->addr_source_cleanup(to_clean); if (to_clean->io_cleanup) to_clean->io_cleanup(to_clean); + + if (to_clean->dev_registered) + platform_device_unregister(to_clean->pdev); + + kfree(to_clean); } static __exit void cleanup_ipmi_si(void) { - int i; + struct smi_info *e, *tmp_e; - if (! initialized) + if (!initialized) return; - for (i = 0; i < SI_MAX_DRIVERS; i++) { - cleanup_one_si(smi_infos[i]); - } +#ifdef CONFIG_PCI + pci_unregister_driver(&ipmi_pci_driver); +#endif + + down(&smi_infos_lock); + list_for_each_entry_safe(e, tmp_e, &smi_infos, link) + cleanup_one_si(e); + up(&smi_infos_lock); + + driver_unregister(&ipmi_driver); } module_exit(cleanup_ipmi_si); diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index bf3d4962d6a5..4b731b24dc16 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h @@ -50,11 +50,12 @@ struct si_sm_io /* Generic info used by the actual handling routines, the state machine shouldn't touch these. */ - void *info; void __iomem *addr; int regspacing; int regsize; int regshift; + int addr_type; + long addr_data; }; /* Results of SMI events. */ diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 1f3159eb1ede..7ece9f3c8f70 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -996,7 +996,7 @@ static struct notifier_block wdog_panic_notifier = { }; -static void ipmi_new_smi(int if_num) +static void ipmi_new_smi(int if_num, struct device *device) { ipmi_register_watchdog(if_num); } @@ -1158,7 +1158,8 @@ static int __init ipmi_wdog_init(void) } register_reboot_notifier(&wdog_reboot_notifier); - notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &wdog_panic_notifier); printk(KERN_INFO PFX "driver initialized\n"); @@ -1176,7 +1177,8 @@ static __exit void ipmi_unregister_watchdog(void) release_nmi(&ipmi_nmi_handler); #endif - notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, + &wdog_panic_notifier); unregister_reboot_notifier(&wdog_reboot_notifier); if (! watchdog_user) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 26d0116b48d4..66719f9d294c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -88,21 +88,15 @@ static inline int uncached_access(struct file *file, unsigned long addr) } #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE -static inline int valid_phys_addr_range(unsigned long addr, size_t *count) +static inline int valid_phys_addr_range(unsigned long addr, size_t count) { - unsigned long end_mem; - - end_mem = __pa(high_memory); - if (addr >= end_mem) + if (addr + count > __pa(high_memory)) return 0; - if (*count > end_mem - addr) - *count = end_mem - addr; - return 1; } -static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size) +static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size) { return 1; } @@ -119,7 +113,7 @@ static ssize_t read_mem(struct file * file, char __user * buf, ssize_t read, sz; char *ptr; - if (!valid_phys_addr_range(p, &count)) + if (!valid_phys_addr_range(p, count)) return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED @@ -177,7 +171,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf, unsigned long copied; void *ptr; - if (!valid_phys_addr_range(p, &count)) + if (!valid_phys_addr_range(p, count)) return -EFAULT; written = 0; @@ -249,7 +243,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) { size_t size = vma->vm_end - vma->vm_start; - if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size)) + if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size)) return -EINVAL; vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, @@ -905,7 +899,7 @@ static const struct { unsigned int minor; char *name; umode_t mode; - struct file_operations *fops; + const struct file_operations *fops; } devlist[] = { /* list of minor devices */ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 3e4c0414a01a..96eb2a709e21 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -129,7 +129,7 @@ static int misc_open(struct inode * inode, struct file * file) int minor = iminor(inode); struct miscdevice *c; int err = -ENODEV; - struct file_operations *old_fops, *new_fops = NULL; + const struct file_operations *old_fops, *new_fops = NULL; down(&misc_sem); diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h index e7fd0b08e0b7..7e188a4d602a 100644 --- a/drivers/char/mxser.h +++ b/drivers/char/mxser.h @@ -118,7 +118,7 @@ // enable CTS interrupt #define MOXA_MUST_IER_ECTSI 0x80 -// eanble RTS interrupt +// enable RTS interrupt #define MOXA_MUST_IER_ERTSI 0x40 // enable Xon/Xoff interrupt #define MOXA_MUST_IER_XINT 0x20 diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index d68be61f0a49..fee2aca3f6a5 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -941,17 +941,6 @@ static void* mgsl_get_text_ptr(void) return mgsl_get_text_ptr; } -/* - * tmp_buf is used as a temporary buffer by mgsl_write. We need to - * lock it in case the COPY_FROM_USER blocks while swapping in a page, - * and some other program tries to do a serial write at the same time. - * Since the lock will only come under contention when the system is - * swapping and available memory is low, it makes sense to share one - * buffer across all the serial ioports, since it significantly saves - * memory if large numbers of serial ports are open. - */ -static unsigned char *tmp_buf; - static inline int mgsl_paranoia_check(struct mgsl_struct *info, char *name, const char *routine) { @@ -2150,7 +2139,7 @@ static int mgsl_write(struct tty_struct * tty, if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) goto cleanup; - if (!tty || !info->xmit_buf || !tmp_buf) + if (!tty || !info->xmit_buf) goto cleanup; if ( info->params.mode == MGSL_MODE_HDLC || @@ -3438,7 +3427,6 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) { struct mgsl_struct *info; int retval, line; - unsigned long page; unsigned long flags; /* verify range of specified line number */ @@ -3472,18 +3460,6 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) goto cleanup; } - if (!tmp_buf) { - page = get_zeroed_page(GFP_KERNEL); - if (!page) { - retval = -ENOMEM; - goto cleanup; - } - if (tmp_buf) - free_page(page); - else - tmp_buf = (unsigned char *) page; - } - info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); @@ -4502,11 +4478,6 @@ static void synclink_cleanup(void) kfree(tmp); } - if (tmp_buf) { - free_page((unsigned long) tmp_buf); - tmp_buf = NULL; - } - if (pci_registered) pci_unregister_driver(&synclink_pci_driver); } @@ -6025,7 +5996,7 @@ static void usc_set_async_mode( struct mgsl_struct *info ) * <15..8> ? RxFIFO IRQ Request Level * * Note: For async mode the receive FIFO level must be set - * to 0 to aviod the situation where the FIFO contains fewer bytes + * to 0 to avoid the situation where the FIFO contains fewer bytes * than the trigger level and no more data is expected. * * <7> 0 Exited Hunt IA (Interrupt Arm) diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 738ec2f4e563..b4d1f4eea435 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -1,5 +1,5 @@ /* - * $Id: synclink_gt.c,v 4.22 2006/01/09 20:16:06 paulkf Exp $ + * $Id: synclink_gt.c,v 4.25 2006/02/06 21:20:33 paulkf Exp $ * * Device driver for Microgate SyncLink GT serial adapters. * @@ -92,7 +92,7 @@ * module identification */ static char *driver_name = "SyncLink GT"; -static char *driver_version = "$Revision: 4.22 $"; +static char *driver_version = "$Revision: 4.25 $"; static char *tty_driver_name = "synclink_gt"; static char *tty_dev_prefix = "ttySLG"; MODULE_LICENSE("GPL"); @@ -188,6 +188,20 @@ static void hdlcdev_exit(struct slgt_info *info); #define SLGT_REG_SIZE 256 /* + * conditional wait facility + */ +struct cond_wait { + struct cond_wait *next; + wait_queue_head_t q; + wait_queue_t wait; + unsigned int data; +}; +static void init_cond_wait(struct cond_wait *w, unsigned int data); +static void add_cond_wait(struct cond_wait **head, struct cond_wait *w); +static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w); +static void flush_cond_wait(struct cond_wait **head); + +/* * DMA buffer descriptor and access macros */ struct slgt_desc @@ -269,6 +283,9 @@ struct slgt_info { struct timer_list tx_timer; struct timer_list rx_timer; + unsigned int gpio_present; + struct cond_wait *gpio_wait_q; + spinlock_t lock; /* spinlock for synchronizing with ISR */ struct work_struct task; @@ -379,6 +396,11 @@ static MGSL_PARAMS default_params = { #define MASK_OVERRUN BIT4 #define GSR 0x00 /* global status */ +#define JCR 0x04 /* JTAG control */ +#define IODR 0x08 /* GPIO direction */ +#define IOER 0x0c /* GPIO interrupt enable */ +#define IOVR 0x10 /* GPIO value */ +#define IOSR 0x14 /* GPIO interrupt status */ #define TDR 0x80 /* tx data */ #define RDR 0x80 /* rx data */ #define TCR 0x82 /* tx control */ @@ -503,6 +525,9 @@ static int tiocmset(struct tty_struct *tty, struct file *file, static void set_break(struct tty_struct *tty, int break_state); static int get_interface(struct slgt_info *info, int __user *if_mode); static int set_interface(struct slgt_info *info, int if_mode); +static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); +static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); +static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio); /* * driver functions @@ -1112,6 +1137,12 @@ static int ioctl(struct tty_struct *tty, struct file *file, return get_interface(info, argp); case MGSL_IOCSIF: return set_interface(info,(int)arg); + case MGSL_IOCSGPIO: + return set_gpio(info, argp); + case MGSL_IOCGGPIO: + return get_gpio(info, argp); + case MGSL_IOCWAITGPIO: + return wait_gpio(info, argp); case TIOCGICOUNT: spin_lock_irqsave(&info->lock,flags); cnow = info->icount; @@ -1762,10 +1793,6 @@ static void rx_async(struct slgt_info *info) DBGDATA(info, p, count, "rx"); for(i=0 ; i < count; i+=2, p+=2) { - if (tty && chars) { - tty_flip_buffer_push(tty); - chars = 0; - } ch = *p; icount->rx++; @@ -2158,6 +2185,24 @@ static void isr_txeom(struct slgt_info *info, unsigned short status) } } +static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state) +{ + struct cond_wait *w, *prev; + + /* wake processes waiting for specific transitions */ + for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) { + if (w->data & changed) { + w->data = state; + wake_up_interruptible(&w->q); + if (prev != NULL) + prev->next = w->next; + else + info->gpio_wait_q = w->next; + } else + prev = w; + } +} + /* interrupt service routine * * irq interrupt number @@ -2193,6 +2238,22 @@ static irqreturn_t slgt_interrupt(int irq, void *dev_id, struct pt_regs * regs) } } + if (info->gpio_present) { + unsigned int state; + unsigned int changed; + while ((changed = rd_reg32(info, IOSR)) != 0) { + DBGISR(("%s iosr=%08x\n", info->device_name, changed)); + /* read latched state of GPIO signals */ + state = rd_reg32(info, IOVR); + /* clear pending GPIO interrupt bits */ + wr_reg32(info, IOSR, changed); + for (i=0 ; i < info->port_count ; i++) { + if (info->port_array[i] != NULL) + isr_gpio(info->port_array[i], changed, state); + } + } + } + for(i=0; i < info->port_count ; i++) { struct slgt_info *port = info->port_array[i]; @@ -2276,6 +2337,8 @@ static void shutdown(struct slgt_info *info) set_signals(info); } + flush_cond_wait(&info->gpio_wait_q); + spin_unlock_irqrestore(&info->lock,flags); if (info->tty) @@ -2650,6 +2713,175 @@ static int set_interface(struct slgt_info *info, int if_mode) return 0; } +/* + * set general purpose IO pin state and direction + * + * user_gpio fields: + * state each bit indicates a pin state + * smask set bit indicates pin state to set + * dir each bit indicates a pin direction (0=input, 1=output) + * dmask set bit indicates pin direction to set + */ +static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + unsigned long flags; + struct gpio_desc gpio; + __u32 data; + + if (!info->gpio_present) + return -EINVAL; + if (copy_from_user(&gpio, user_gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n", + info->device_name, gpio.state, gpio.smask, + gpio.dir, gpio.dmask)); + + spin_lock_irqsave(&info->lock,flags); + if (gpio.dmask) { + data = rd_reg32(info, IODR); + data |= gpio.dmask & gpio.dir; + data &= ~(gpio.dmask & ~gpio.dir); + wr_reg32(info, IODR, data); + } + if (gpio.smask) { + data = rd_reg32(info, IOVR); + data |= gpio.smask & gpio.state; + data &= ~(gpio.smask & ~gpio.state); + wr_reg32(info, IOVR, data); + } + spin_unlock_irqrestore(&info->lock,flags); + + return 0; +} + +/* + * get general purpose IO pin state and direction + */ +static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + struct gpio_desc gpio; + if (!info->gpio_present) + return -EINVAL; + gpio.state = rd_reg32(info, IOVR); + gpio.smask = 0xffffffff; + gpio.dir = rd_reg32(info, IODR); + gpio.dmask = 0xffffffff; + if (copy_to_user(user_gpio, &gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s get_gpio state=%08x dir=%08x\n", + info->device_name, gpio.state, gpio.dir)); + return 0; +} + +/* + * conditional wait facility + */ +static void init_cond_wait(struct cond_wait *w, unsigned int data) +{ + init_waitqueue_head(&w->q); + init_waitqueue_entry(&w->wait, current); + w->data = data; +} + +static void add_cond_wait(struct cond_wait **head, struct cond_wait *w) +{ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&w->q, &w->wait); + w->next = *head; + *head = w; +} + +static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw) +{ + struct cond_wait *w, *prev; + remove_wait_queue(&cw->q, &cw->wait); + set_current_state(TASK_RUNNING); + for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) { + if (w == cw) { + if (prev != NULL) + prev->next = w->next; + else + *head = w->next; + break; + } + } +} + +static void flush_cond_wait(struct cond_wait **head) +{ + while (*head != NULL) { + wake_up_interruptible(&(*head)->q); + *head = (*head)->next; + } +} + +/* + * wait for general purpose I/O pin(s) to enter specified state + * + * user_gpio fields: + * state - bit indicates target pin state + * smask - set bit indicates watched pin + * + * The wait ends when at least one watched pin enters the specified + * state. When 0 (no error) is returned, user_gpio->state is set to the + * state of all GPIO pins when the wait ends. + * + * Note: Each pin may be a dedicated input, dedicated output, or + * configurable input/output. The number and configuration of pins + * varies with the specific adapter model. Only input pins (dedicated + * or configured) can be monitored with this function. + */ +static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio) +{ + unsigned long flags; + int rc = 0; + struct gpio_desc gpio; + struct cond_wait wait; + u32 state; + + if (!info->gpio_present) + return -EINVAL; + if (copy_from_user(&gpio, user_gpio, sizeof(gpio))) + return -EFAULT; + DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n", + info->device_name, gpio.state, gpio.smask)); + /* ignore output pins identified by set IODR bit */ + if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0) + return -EINVAL; + init_cond_wait(&wait, gpio.smask); + + spin_lock_irqsave(&info->lock, flags); + /* enable interrupts for watched pins */ + wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask); + /* get current pin states */ + state = rd_reg32(info, IOVR); + + if (gpio.smask & ~(state ^ gpio.state)) { + /* already in target state */ + gpio.state = state; + } else { + /* wait for target state */ + add_cond_wait(&info->gpio_wait_q, &wait); + spin_unlock_irqrestore(&info->lock, flags); + schedule(); + if (signal_pending(current)) + rc = -ERESTARTSYS; + else + gpio.state = wait.data; + spin_lock_irqsave(&info->lock, flags); + remove_cond_wait(&info->gpio_wait_q, &wait); + } + + /* disable all GPIO interrupts if no waiting processes */ + if (info->gpio_wait_q == NULL) + wr_reg32(info, IOER, 0); + spin_unlock_irqrestore(&info->lock,flags); + + if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio))) + rc = -EFAULT; + return rc; +} + static int modem_input_wait(struct slgt_info *info,int arg) { unsigned long flags; @@ -3166,8 +3398,10 @@ static void device_init(int adapter_num, struct pci_dev *pdev) } else { port_array[0]->irq_requested = 1; adapter_test(port_array[0]); - for (i=1 ; i < port_count ; i++) + for (i=1 ; i < port_count ; i++) { port_array[i]->init_error = port_array[0]->init_error; + port_array[i]->gpio_present = port_array[0]->gpio_present; + } } } } @@ -4301,7 +4535,7 @@ static int register_test(struct slgt_info *info) break; } } - + info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0; info->init_error = rc ? 0 : DiagStatus_AddressFailure; return rc; } diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 4c272189cd42..2546637a55c0 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -767,6 +767,7 @@ static int __init tlclk_init(void) printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); return ret; } + tlclk_major = ret; alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); if (!alarm_events) goto out1; diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 48d795bb8c4b..811dadb9ce3e 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -543,14 +543,12 @@ void tty_ldisc_put(int disc) struct tty_ldisc *ld; unsigned long flags; - if (disc < N_TTY || disc >= NR_LDISCS) - BUG(); + BUG_ON(disc < N_TTY || disc >= NR_LDISCS); spin_lock_irqsave(&tty_ldisc_lock, flags); ld = &tty_ldiscs[disc]; - if(ld->refcount == 0) - BUG(); - ld->refcount --; + BUG_ON(ld->refcount == 0); + ld->refcount--; module_put(ld->owner); spin_unlock_irqrestore(&tty_ldisc_lock, flags); } @@ -645,8 +643,7 @@ void tty_ldisc_deref(struct tty_ldisc *ld) { unsigned long flags; - if(ld == NULL) - BUG(); + BUG_ON(ld == NULL); spin_lock_irqsave(&tty_ldisc_lock, flags); if(ld->refcount == 0) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index aed80e6aec6d..9b6ae7dc8b8a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -52,9 +52,8 @@ static void handle_update(void *data); * changes to devices when the CPU clock speed changes. * The mutex locks both lists. */ -static struct notifier_block *cpufreq_policy_notifier_list; -static struct notifier_block *cpufreq_transition_notifier_list; -static DECLARE_RWSEM (cpufreq_notifier_rwsem); +static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); +static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list); static LIST_HEAD(cpufreq_governor_list); @@ -247,8 +246,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); - down_read(&cpufreq_notifier_rwsem); - policy = cpufreq_cpu_data[freqs->cpu]; switch (state) { @@ -266,20 +263,19 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) freqs->old = policy->cur; } } - notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_PRECHANGE, freqs); + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_POSTCHANGE, freqs); + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) policy->cur = freqs->new; break; } - up_read(&cpufreq_notifier_rwsem); } EXPORT_SYMBOL_GPL(cpufreq_notify_transition); @@ -1007,7 +1003,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - notifier_call_chain(&cpufreq_transition_notifier_list, + blocking_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_SUSPENDCHANGE, &freqs); adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); @@ -1088,7 +1084,8 @@ static int cpufreq_resume(struct sys_device * sysdev) freqs.old = cpu_policy->cur; freqs.new = cur_freq; - notifier_call_chain(&cpufreq_transition_notifier_list, + blocking_notifier_call_chain( + &cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); @@ -1125,24 +1122,24 @@ static struct sysdev_driver cpufreq_sysdev_driver = { * changes in cpufreq policy. * * This function may sleep, and has the same return conditions as - * notifier_chain_register. + * blocking_notifier_chain_register. */ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) { int ret; - down_write(&cpufreq_notifier_rwsem); switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); + ret = blocking_notifier_chain_register( + &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: - ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); + ret = blocking_notifier_chain_register( + &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } - up_write(&cpufreq_notifier_rwsem); return ret; } @@ -1157,24 +1154,24 @@ EXPORT_SYMBOL(cpufreq_register_notifier); * Remove a driver from the CPU frequency notifier list. * * This function may sleep, and has the same return conditions as - * notifier_chain_unregister. + * blocking_notifier_chain_unregister. */ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) { int ret; - down_write(&cpufreq_notifier_rwsem); switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: - ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); + ret = blocking_notifier_chain_unregister( + &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: - ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); + ret = blocking_notifier_chain_unregister( + &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } - up_write(&cpufreq_notifier_rwsem); return ret; } @@ -1346,29 +1343,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli if (ret) goto error_out; - down_read(&cpufreq_notifier_rwsem); - /* adjust if necessary - all reasons */ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, - policy); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_ADJUST, policy); /* adjust if necessary - hardware incompatibility*/ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, - policy); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_INCOMPATIBLE, policy); /* verify the cpu speed can be set within this limit, which might be different to the first one */ ret = cpufreq_driver->verify(policy); - if (ret) { - up_read(&cpufreq_notifier_rwsem); + if (ret) goto error_out; - } /* notification of the new policy */ - notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, - policy); - - up_read(&cpufreq_notifier_rwsem); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_NOTIFY, policy); data->min = policy->min; data->max = policy->max; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index ac38766b2583..037f6bf4543c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -35,12 +35,7 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (0) -#define MAX_FREQUENCY_UP_THRESHOLD (100) - #define DEF_FREQUENCY_DOWN_THRESHOLD (20) -#define MIN_FREQUENCY_DOWN_THRESHOLD (0) -#define MAX_FREQUENCY_DOWN_THRESHOLD (100) /* * The polling frequency of this governor depends on the capability of @@ -53,10 +48,14 @@ * All times here are in uS. */ static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE (def_sampling_rate / 2) +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) +#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) -#define DEF_SAMPLING_DOWN_FACTOR (5) +#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) static void do_dbs_timer(void *data); @@ -66,6 +65,8 @@ struct cpu_dbs_info_s { unsigned int prev_cpu_idle_up; unsigned int prev_cpu_idle_down; unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); @@ -87,6 +88,8 @@ static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -136,7 +139,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf (buf, "%u", &input); - if (ret != 1 ) + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; mutex_lock(&dbs_mutex); @@ -173,8 +176,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -194,8 +196,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || - input < MIN_FREQUENCY_DOWN_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -297,31 +298,17 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; - static int down_skip[NR_CPUS]; - static int requested_freq[NR_CPUS]; - static unsigned short init_flag = 0; - struct cpu_dbs_info_s *this_dbs_info; - struct cpu_dbs_info_s *dbs_info; - + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); struct cpufreq_policy *policy; - unsigned int j; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); if (!this_dbs_info->enable) return; policy = this_dbs_info->cur_policy; - if ( init_flag == 0 ) { - for_each_online_cpu(j) { - dbs_info = &per_cpu(cpu_dbs_info, j); - requested_freq[j] = dbs_info->cur_policy->cur; - } - init_flag = 1; - } - /* * The default safe range is 20% to 80% * Every sampling_rate, we check @@ -337,39 +324,29 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(j); - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_up; - j_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { - down_skip[cpu] = 0; - for_each_cpu_mask(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_down = - j_dbs_info->prev_cpu_idle_up; - } /* if we are already at full speed then break out early */ - if (requested_freq[cpu] == policy->max) + if (this_dbs_info->requested_freq == policy->max) return; freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; @@ -378,49 +355,45 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] += freq_step; - if (requested_freq[cpu] > policy->max) - requested_freq[cpu] = policy->max; + this_dbs_info->requested_freq += freq_step; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; - __cpufreq_driver_target(policy, requested_freq[cpu], + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } /* Check for frequency decrease */ - down_skip[cpu]++; - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) + this_dbs_info->down_skip++; + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) return; - idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - total_idle_ticks = j_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_down; - j_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; - down_skip[cpu] = 0; + this_dbs_info->down_skip = 0; freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); + usecs_to_jiffies(freq_down_sampling_rate); if (idle_ticks > down_idle_ticks) { - /* if we are already at the lowest speed then break out early + /* + * if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero */ - if (requested_freq[cpu] == policy->min + * freq_step to be zero + */ + if (this_dbs_info->requested_freq == policy->min || dbs_tuners_ins.freq_step == 0) return; @@ -430,13 +403,12 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] -= freq_step; - if (requested_freq[cpu] < policy->min) - requested_freq[cpu] = policy->min; + this_dbs_info->requested_freq -= freq_step; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; - __cpufreq_driver_target(policy, - requested_freq[cpu], - CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); return; } } @@ -493,11 +465,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; sysfs_create_group(&policy->kobj, &dbs_attr_group); dbs_enable++; /* @@ -507,16 +481,17 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - latency = policy->cpuinfo.transition_latency; - if (latency < 1000) - latency = 1000; - - def_sampling_rate = (latency / 1000) * + def_sampling_rate = 10 * latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_tuners_ins.freq_step = 5; dbs_timer_init(); } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 69aa1db8336c..956d121cb161 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -84,6 +84,7 @@ struct dbs_tuners { static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -350,6 +351,9 @@ static void dbs_check_cpu(int cpu) freq_next = (freq_next * policy->cur) / (dbs_tuners_ins.up_threshold - 10); + if (freq_next < policy->min) + freq_next = policy->min; + if (freq_next <= ((policy->cur * 95) / 100)) __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } @@ -395,8 +399,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return -EINVAL; if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) + (TRANSITION_LATENCY_LIMIT * 1000)) { + printk(KERN_WARNING "ondemand governor failed to load " + "due to too long transition latency\n"); return -EINVAL; + } if (this_dbs_info->enable) /* Already enabled */ break; @@ -431,8 +438,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, def_sampling_rate = MIN_STAT_SAMPLING_RATE; dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_timer_init(); } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 52f3eb45d2b9..b582d0cdc24f 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -64,35 +64,35 @@ config EDAC_AMD76X config EDAC_E7XXX tristate "Intel e7xxx (e7205, e7500, e7501, e7505)" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && X86_32 help Support for error detection and correction on the Intel E7205, E7500, E7501 and E7505 server chipsets. config EDAC_E752X tristate "Intel e752x (e7520, e7525, e7320)" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && X86 help Support for error detection and correction on the Intel E7520, E7525, E7320 server chipsets. config EDAC_I82875P tristate "Intel 82875p (D82875P, E7210)" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && X86_32 help Support for error detection and correction on the Intel DP82785P and E7210 server chipsets. config EDAC_I82860 tristate "Intel 82860" - depends on EDAC_MM_EDAC && PCI + depends on EDAC_MM_EDAC && PCI && X86_32 help Support for error detection and correction on the Intel 82860 chipset. config EDAC_R82600 tristate "Radisys 82600 embedded chipset" - depends on EDAC_MM_EDAC + depends on EDAC_MM_EDAC && PCI && X86_32 help Support for error detection and correction on the Radisys 82600 embedded chipset. diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 2fcc8120b53c..53423ad6d4a3 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c @@ -12,25 +12,26 @@ * */ - #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> - #include <linux/pci.h> #include <linux/pci_ids.h> - #include <linux/slab.h> - #include "edac_mc.h" +#define amd76x_printk(level, fmt, arg...) \ + edac_printk(level, "amd76x", fmt, ##arg) + +#define amd76x_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) #define AMD76X_NR_CSROWS 8 #define AMD76X_NR_CHANS 1 #define AMD76X_NR_DIMMS 4 - /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ + #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) * * 31:16 reserved @@ -42,6 +43,7 @@ * 7:4 UE cs row * 3:0 CE cs row */ + #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) * * 31:26 clock disable 5 - 0 @@ -56,6 +58,7 @@ * 15:8 reserved * 7:0 x4 mode enable 7 - 0 */ + #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) * * 31:23 chip-select base @@ -66,29 +69,28 @@ * 0 chip-select enable */ - struct amd76x_error_info { u32 ecc_mode_status; }; - enum amd76x_chips { AMD761 = 0, AMD762 }; - struct amd76x_dev_info { const char *ctl_name; }; - static const struct amd76x_dev_info amd76x_devs[] = { - [AMD761] = {.ctl_name = "AMD761"}, - [AMD762] = {.ctl_name = "AMD762"}, + [AMD761] = { + .ctl_name = "AMD761" + }, + [AMD762] = { + .ctl_name = "AMD762" + }, }; - /** * amd76x_get_error_info - fetch error information * @mci: Memory controller @@ -97,23 +99,21 @@ static const struct amd76x_dev_info amd76x_devs[] = { * Fetch and store the AMD76x ECC status. Clear pending status * on the chip so that further errors will be reported */ - -static void amd76x_get_error_info (struct mem_ctl_info *mci, - struct amd76x_error_info *info) +static void amd76x_get_error_info(struct mem_ctl_info *mci, + struct amd76x_error_info *info) { pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, &info->ecc_mode_status); if (info->ecc_mode_status & BIT(8)) pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, - (u32) BIT(8), (u32) BIT(8)); + (u32) BIT(8), (u32) BIT(8)); if (info->ecc_mode_status & BIT(9)) pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, - (u32) BIT(9), (u32) BIT(9)); + (u32) BIT(9), (u32) BIT(9)); } - /** * amd76x_process_error_info - Error check * @mci: Memory controller @@ -124,8 +124,7 @@ static void amd76x_get_error_info (struct mem_ctl_info *mci, * A return of 1 indicates an error. Also if handle_errors is true * then attempt to handle and clean up after the error */ - -static int amd76x_process_error_info (struct mem_ctl_info *mci, +static int amd76x_process_error_info(struct mem_ctl_info *mci, struct amd76x_error_info *info, int handle_errors) { int error_found; @@ -141,9 +140,8 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, if (handle_errors) { row = (info->ecc_mode_status >> 4) & 0xf; - edac_mc_handle_ue(mci, - mci->csrows[row].first_page, 0, row, - mci->ctl_name); + edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, + row, mci->ctl_name); } } @@ -155,11 +153,11 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, if (handle_errors) { row = info->ecc_mode_status & 0xf; - edac_mc_handle_ce(mci, - mci->csrows[row].first_page, 0, 0, row, 0, - mci->ctl_name); + edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, + 0, row, 0, mci->ctl_name); } } + return error_found; } @@ -170,16 +168,14 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, * Called by the poll handlers this function reads the status * from the controller and checks for errors. */ - static void amd76x_check(struct mem_ctl_info *mci) { struct amd76x_error_info info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); amd76x_get_error_info(mci, &info); amd76x_process_error_info(mci, &info, 1); } - /** * amd76x_probe1 - Perform set up for detected device * @pdev; PCI device detected @@ -189,7 +185,6 @@ static void amd76x_check(struct mem_ctl_info *mci) * controller status reporting. We configure and set up the * memory controller reporting and claim the device. */ - static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; @@ -203,12 +198,11 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) }; u32 ems; u32 ems_mode; + struct amd76x_error_info discard; - debugf0("MC: " __FILE__ ": %s()\n", __func__); - + debugf0("%s()\n", __func__); pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); ems_mode = (ems >> 10) & 0x3; - mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); if (mci == NULL) { @@ -216,16 +210,13 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) goto fail; } - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); - - mci->pdev = pci_dev_get(pdev); + debugf0("%s(): mci = %p\n", __func__, mci); + mci->pdev = pdev; mci->mtype_cap = MEM_FLAG_RDDR; - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = ems_mode ? - (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; - - mci->mod_name = BS_MOD_STR; + (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.4.2.5 $"; mci->ctl_name = amd76x_devs[dev_idx].ctl_name; mci->edac_check = amd76x_check; @@ -240,18 +231,15 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) /* find the DRAM Chip Select Base address and mask */ pci_read_config_dword(mci->pdev, - AMD76X_MEM_BASE_ADDR + (index * 4), - &mba); + AMD76X_MEM_BASE_ADDR + (index * 4), &mba); if (!(mba & BIT(0))) continue; mba_base = mba & 0xff800000UL; mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; - pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS, - &dms); - + &dms); csrow->first_page = mba_base >> PAGE_SHIFT; csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; csrow->last_page = csrow->first_page + csrow->nr_pages - 1; @@ -262,40 +250,33 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) csrow->edac_mode = ems_modes[ems_mode]; } - /* clear counters */ - pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8), - (u32) (0x3 << 8)); + amd76x_get_error_info(mci, &discard); /* clear counters */ if (edac_mc_add_mc(mci)) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); return 0; fail: - if (mci) { - if(mci->pdev) - pci_dev_put(mci->pdev); + if (mci != NULL) edac_mc_free(mci); - } return rc; } /* returns count (>= 0), or negative on error */ static int __devinit amd76x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); /* don't need to call pci_device_enable() */ return amd76x_probe1(pdev, ent->driver_data); } - /** * amd76x_remove_one - driver shutdown * @pdev: PCI device being handed back @@ -304,35 +285,36 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev, * structure for the device then delete the mci and free the * resources. */ - static void __devexit amd76x_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); - if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) + if ((mci = edac_mc_del_mc(pdev)) == NULL) return; - if (edac_mc_del_mc(mci)) - return; - pci_dev_put(mci->pdev); + edac_mc_free(mci); } - static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { - {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - AMD762}, - {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - AMD761}, - {0,} /* 0 terminated list. */ + { + PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD762 + }, + { + PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD761 + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); - static struct pci_driver amd76x_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = amd76x_init_one, .remove = __devexit_p(amd76x_remove_one), .id_table = amd76x_pci_tbl, diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index c454ded2b060..66572c5323ad 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -17,18 +17,19 @@ * */ - #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> - #include <linux/pci.h> #include <linux/pci_ids.h> - #include <linux/slab.h> - #include "edac_mc.h" +#define e752x_printk(level, fmt, arg...) \ + edac_printk(level, "e752x", fmt, ##arg) + +#define e752x_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_7520_0 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 @@ -56,7 +57,6 @@ #define E752X_NR_CSROWS 8 /* number of csrows */ - /* E752X register addresses - device 0 function 0 */ #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ @@ -156,7 +156,6 @@ enum e752x_chips { E7320 = 2 }; - struct e752x_pvt { struct pci_dev *bridge_ck; struct pci_dev *dev_d0f0; @@ -170,9 +169,9 @@ struct e752x_pvt { const struct e752x_dev_info *dev_info; }; - struct e752x_dev_info { u16 err_dev; + u16 ctl_dev; const char *ctl_name; }; @@ -198,38 +197,47 @@ struct e752x_error_info { static const struct e752x_dev_info e752x_devs[] = { [E7520] = { - .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, - .ctl_name = "E7520"}, + .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, + .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, + .ctl_name = "E7520" + }, [E7525] = { - .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, - .ctl_name = "E7525"}, + .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, + .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, + .ctl_name = "E7525" + }, [E7320] = { - .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, - .ctl_name = "E7320"}, + .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, + .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, + .ctl_name = "E7320" + }, }; - static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, - unsigned long page) + unsigned long page) { u32 remap; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); if (page < pvt->tolm) return page; + if ((page >= 0x100000) && (page < pvt->remapbase)) return page; + remap = (page - pvt->tolm) + pvt->remapbase; + if (remap < pvt->remaplimit) return remap; - printk(KERN_ERR "Invalid page %lx - out of range\n", page); + + e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); return pvt->tolm - 1; } static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, - u32 sec1_add, u16 sec1_syndrome) + u32 sec1_add, u16 sec1_syndrome) { u32 page; int row; @@ -237,7 +245,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, int i; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); /* convert the addr to 4k page */ page = sec1_add >> (PAGE_SHIFT - 4); @@ -246,36 +254,37 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, if (pvt->mc_symmetric) { /* chip select are bits 14 & 13 */ row = ((page >> 1) & 3); - printk(KERN_WARNING - "Test row %d Table %d %d %d %d %d %d %d %d\n", - row, pvt->map[0], pvt->map[1], pvt->map[2], - pvt->map[3], pvt->map[4], pvt->map[5], - pvt->map[6], pvt->map[7]); + e752x_printk(KERN_WARNING, + "Test row %d Table %d %d %d %d %d %d %d %d\n", row, + pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], + pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]); /* test for channel remapping */ for (i = 0; i < 8; i++) { if (pvt->map[i] == row) break; } - printk(KERN_WARNING "Test computed row %d\n", i); + + e752x_printk(KERN_WARNING, "Test computed row %d\n", i); + if (i < 8) row = i; else - printk(KERN_WARNING - "MC%d: row %d not found in remap table\n", - mci->mc_idx, row); + e752x_mc_printk(mci, KERN_WARNING, + "row %d not found in remap table\n", row); } else row = edac_mc_find_csrow_by_page(mci, page); + /* 0 = channel A, 1 = channel B */ channel = !(error_one & 1); if (!pvt->map_type) row = 7 - row; + edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, - "e752x CE"); + "e752x CE"); } - static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, u32 sec1_add, u16 sec1_syndrome, int *error_found, int handle_error) @@ -286,36 +295,42 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, do_process_ce(mci, error_one, sec1_add, sec1_syndrome); } -static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add, - u32 scrb_add) +static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, + u32 ded_add, u32 scrb_add) { u32 error_2b, block_page; int row; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); if (error_one & 0x0202) { error_2b = ded_add; + /* convert to 4k address */ block_page = error_2b >> (PAGE_SHIFT - 4); + row = pvt->mc_symmetric ? - /* chip select are bits 14 & 13 */ - ((block_page >> 1) & 3) : - edac_mc_find_csrow_by_page(mci, block_page); + /* chip select are bits 14 & 13 */ + ((block_page >> 1) & 3) : + edac_mc_find_csrow_by_page(mci, block_page); + edac_mc_handle_ue(mci, block_page, 0, row, - "e752x UE from Read"); + "e752x UE from Read"); } if (error_one & 0x0404) { error_2b = scrb_add; + /* convert to 4k address */ block_page = error_2b >> (PAGE_SHIFT - 4); + row = pvt->mc_symmetric ? - /* chip select are bits 14 & 13 */ - ((block_page >> 1) & 3) : - edac_mc_find_csrow_by_page(mci, block_page); + /* chip select are bits 14 & 13 */ + ((block_page >> 1) & 3) : + edac_mc_find_csrow_by_page(mci, block_page); + edac_mc_handle_ue(mci, block_page, 0, row, - "e752x UE from Scruber"); + "e752x UE from Scruber"); } } @@ -336,7 +351,7 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, if (!handle_error) return; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); } @@ -348,13 +363,13 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; error_1b = retry_add; - page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ + page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ row = pvt->mc_symmetric ? - ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ - edac_mc_find_csrow_by_page(mci, page); - printk(KERN_WARNING - "MC%d: CE page 0x%lx, row %d : Memory read retry\n", - mci->mc_idx, (long unsigned int) page, row); + ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ + edac_mc_find_csrow_by_page(mci, page); + e752x_mc_printk(mci, KERN_WARNING, + "CE page 0x%lx, row %d : Memory read retry\n", + (long unsigned int) page, row); } static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, @@ -372,8 +387,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, *error_found = 1; if (handle_error) - printk(KERN_WARNING "MC%d: Memory threshold CE\n", - mci->mc_idx); + e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); } static char *global_message[11] = { @@ -391,8 +405,8 @@ static void do_global_error(int fatal, u32 errors) for (i = 0; i < 11; i++) { if (errors & (1 << i)) - printk(KERN_WARNING "%sError %s\n", - fatal_message[fatal], global_message[i]); + e752x_printk(KERN_WARNING, "%sError %s\n", + fatal_message[fatal], global_message[i]); } } @@ -418,8 +432,8 @@ static void do_hub_error(int fatal, u8 errors) for (i = 0; i < 7; i++) { if (errors & (1 << i)) - printk(KERN_WARNING "%sError %s\n", - fatal_message[fatal], hub_message[i]); + e752x_printk(KERN_WARNING, "%sError %s\n", + fatal_message[fatal], hub_message[i]); } } @@ -445,8 +459,8 @@ static void do_membuf_error(u8 errors) for (i = 0; i < 4; i++) { if (errors & (1 << i)) - printk(KERN_WARNING "Non-Fatal Error %s\n", - membuf_message[i]); + e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", + membuf_message[i]); } } @@ -458,8 +472,7 @@ static inline void membuf_error(u8 errors, int *error_found, int handle_error) do_membuf_error(errors); } -#if 0 -char *sysbus_message[10] = { +static char *sysbus_message[10] = { "Addr or Request Parity", "Data Strobe Glitch", "Addr Strobe Glitch", @@ -470,7 +483,6 @@ char *sysbus_message[10] = { "Memory Parity", "IO Subsystem Parity" }; -#endif /* 0 */ static void do_sysbus_error(int fatal, u32 errors) { @@ -478,8 +490,8 @@ static void do_sysbus_error(int fatal, u32 errors) for (i = 0; i < 10; i++) { if (errors & (1 << i)) - printk(KERN_WARNING "%sError System Bus %s\n", - fatal_message[fatal], global_message[i]); + e752x_printk(KERN_WARNING, "%sError System Bus %s\n", + fatal_message[fatal], sysbus_message[i]); } } @@ -492,33 +504,42 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found, do_sysbus_error(fatal, errors); } -static void e752x_check_hub_interface (struct e752x_error_info *info, +static void e752x_check_hub_interface(struct e752x_error_info *info, int *error_found, int handle_error) { u8 stat8; //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); + stat8 = info->hi_ferr; + if(stat8 & 0x7f) { /* Error, so process */ stat8 &= 0x7f; + if(stat8 & 0x2b) hub_error(1, stat8 & 0x2b, error_found, handle_error); + if(stat8 & 0x54) hub_error(0, stat8 & 0x54, error_found, handle_error); } + //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); + stat8 = info->hi_nerr; + if(stat8 & 0x7f) { /* Error, so process */ stat8 &= 0x7f; + if (stat8 & 0x2b) hub_error(1, stat8 & 0x2b, error_found, handle_error); + if(stat8 & 0x54) hub_error(0, stat8 & 0x54, error_found, handle_error); } } -static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, - int handle_error) +static void e752x_check_sysbus(struct e752x_error_info *info, + int *error_found, int handle_error) { u32 stat32, error32; @@ -530,27 +551,34 @@ static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, error32 = (stat32 >> 16) & 0x3ff; stat32 = stat32 & 0x3ff; + if(stat32 & 0x083) sysbus_error(1, stat32 & 0x083, error_found, handle_error); + if(stat32 & 0x37c) sysbus_error(0, stat32 & 0x37c, error_found, handle_error); + if(error32 & 0x083) sysbus_error(1, error32 & 0x083, error_found, handle_error); + if(error32 & 0x37c) sysbus_error(0, error32 & 0x37c, error_found, handle_error); } -static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, - int handle_error) +static void e752x_check_membuf (struct e752x_error_info *info, + int *error_found, int handle_error) { u8 stat8; stat8 = info->buf_ferr; + if (stat8 & 0x0f) { /* Error, so process */ stat8 &= 0x0f; membuf_error(stat8, error_found, handle_error); } + stat8 = info->buf_nerr; + if (stat8 & 0x0f) { /* Error, so process */ stat8 &= 0x0f; membuf_error(stat8, error_found, handle_error); @@ -558,7 +586,8 @@ static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, } static void e752x_check_dram (struct mem_ctl_info *mci, - struct e752x_error_info *info, int *error_found, int handle_error) + struct e752x_error_info *info, int *error_found, + int handle_error) { u16 error_one, error_next; @@ -608,7 +637,7 @@ static void e752x_check_dram (struct mem_ctl_info *mci, } static void e752x_get_error_info (struct mem_ctl_info *mci, - struct e752x_error_info *info) + struct e752x_error_info *info) { struct pci_dev *dev; struct e752x_pvt *pvt; @@ -616,7 +645,6 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, memset(info, 0, sizeof(*info)); pvt = (struct e752x_pvt *) mci->pvt_info; dev = pvt->dev_d0f1; - pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); if (info->ferr_global) { @@ -727,7 +755,8 @@ static int e752x_process_error_info (struct mem_ctl_info *mci, static void e752x_check(struct mem_ctl_info *mci) { struct e752x_error_info info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + + debugf3("%s()\n", __func__); e752x_get_error_info(mci, &info); e752x_process_error_info(mci, &info, 1); } @@ -736,23 +765,21 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; int index; - u16 pci_data, stat; - u32 stat32; - u16 stat16; + u16 pci_data; u8 stat8; struct mem_ctl_info *mci = NULL; struct e752x_pvt *pvt = NULL; u16 ddrcsr; u32 drc; - int drc_chan; /* Number of channels 0=1chan,1=2chan */ - int drc_drbg; /* DRB granularity 0=64mb,1=128mb */ - int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ + int drc_chan; /* Number of channels 0=1chan,1=2chan */ + int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ + int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ u32 dra; unsigned long last_cumul_size; - struct pci_dev *pres_dev; struct pci_dev *dev = NULL; + struct e752x_error_info discard; - debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); + debugf0("%s(): mci\n", __func__); debugf0("Starting Probe1\n"); /* enable device 0 function 1 */ @@ -776,34 +803,35 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) goto fail; } - debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); - + debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ - mci->mod_name = BS_MOD_STR; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.5.2.11 $"; mci->pdev = pdev; - debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); + debugf3("%s(): init pvt\n", __func__); pvt = (struct e752x_pvt *) mci->pvt_info; pvt->dev_info = &e752x_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, - pvt->dev_info->err_dev, - pvt->bridge_ck); + pvt->dev_info->err_dev, + pvt->bridge_ck); + if (pvt->bridge_ck == NULL) pvt->bridge_ck = pci_scan_single_device(pdev->bus, - PCI_DEVFN(0, 1)); + PCI_DEVFN(0, 1)); + if (pvt->bridge_ck == NULL) { - printk(KERN_ERR "MC: error reporting device not found:" - "vendor %x device 0x%x (broken BIOS?)\n", - PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); + e752x_printk(KERN_ERR, "error reporting device not found:" + "vendor %x device 0x%x (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); goto fail; } - pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); - debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); + pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); + debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; mci->edac_check = e752x_check; mci->ctl_page_to_phys = ctl_page_to_phys; @@ -820,6 +848,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { u8 value; u32 cumul_size; + /* mem_dev 0=x8, 1=x4 */ int mem_dev = (dra >> (index * 4 + 2)) & 0x3; struct csrow_info *csrow = &mci->csrows[index]; @@ -828,17 +857,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) pci_read_config_byte(mci->pdev, E752X_DRB + index, &value); /* convert a 128 or 64 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); - debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", - __func__, index, cumul_size); + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, + cumul_size); + if (cumul_size == last_cumul_size) - continue; /* not populated */ + continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; - csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ - csrow->mtype = MEM_RDDR; /* only one type supported */ + csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ + csrow->mtype = MEM_RDDR; /* only one type supported */ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; /* @@ -862,29 +892,32 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) u8 value; u8 last = 0; u8 row = 0; - for (index = 0; index < 8; index += 2) { + for (index = 0; index < 8; index += 2) { pci_read_config_byte(mci->pdev, E752X_DRB + index, - &value); + &value); + /* test if there is a dimm in this slot */ if (value == last) { /* no dimm in the slot, so flag it as empty */ pvt->map[index] = 0xff; pvt->map[index + 1] = 0xff; - } else { /* there is a dimm in the slot */ + } else { /* there is a dimm in the slot */ pvt->map[index] = row; row++; last = value; /* test the next value to see if the dimm is double sided */ pci_read_config_byte(mci->pdev, - E752X_DRB + index + 1, - &value); + E752X_DRB + index + 1, + &value); pvt->map[index + 1] = (value == last) ? - 0xff : /* the dimm is single sided, - so flag as empty */ - row; /* this is a double sided dimm - to save the next row # */ + 0xff : /* the dimm is single sided, + * so flag as empty + */ + row; /* this is a double sided dimm + * to save the next row # + */ row++; last = value; } @@ -896,9 +929,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); mci->edac_cap |= EDAC_FLAG_NONE; + debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); - debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", - __func__); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); pvt->tolm = ((u32) pci_data) << 4; @@ -906,43 +938,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) pvt->remapbase = ((u32) pci_data) << 14; pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); pvt->remaplimit = ((u32) pci_data) << 14; - printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, - pvt->remapbase, pvt->remaplimit); + e752x_printk(KERN_INFO, + "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, + pvt->remapbase, pvt->remaplimit); if (edac_mc_add_mc(mci)) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", - __func__); + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } - /* Walk through the PCI table and clear errors */ - switch (dev_idx) { - case E7520: - dev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_7520_0, NULL); - break; - case E7525: - dev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_7525_0, NULL); - break; - case E7320: - dev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_7320_0, NULL); - break; - } - - + dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, + NULL); pvt->dev_d0f0 = dev; - for (pres_dev = dev; - ((struct pci_dev *) pres_dev->global_list.next != dev); - pres_dev = (struct pci_dev *) pres_dev->global_list.next) { - pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32); - stat = (u16) (stat32 >> 16); - /* clear any error bits */ - if (stat32 & ((1 << 6) + (1 << 8))) - pci_write_config_word(pres_dev, PCI_STATUS, stat); - } /* find the error reporting device and clear errors */ dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); /* Turn off error disable & SMI in case the BIOS turned it on */ @@ -954,67 +961,51 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); - /* clear other MCH errors */ - pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32); - pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32); - pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32); - pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32); - pci_read_config_byte(dev, E752X_HI_FERR, &stat8); - pci_write_config_byte(dev, E752X_HI_FERR, stat8); - pci_read_config_byte(dev, E752X_HI_NERR, &stat8); - pci_write_config_byte(dev, E752X_HI_NERR, stat8); - pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32); - pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32); - pci_read_config_byte(dev, E752X_BUF_FERR, &stat8); - pci_write_config_byte(dev, E752X_BUF_FERR, stat8); - pci_read_config_byte(dev, E752X_BUF_NERR, &stat8); - pci_write_config_byte(dev, E752X_BUF_NERR, stat8); - pci_read_config_word(dev, E752X_DRAM_FERR, &stat16); - pci_write_config_word(dev, E752X_DRAM_FERR, stat16); - pci_read_config_word(dev, E752X_DRAM_NERR, &stat16); - pci_write_config_word(dev, E752X_DRAM_NERR, stat16); + + e752x_get_error_info(mci, &discard); /* clear other MCH errors */ /* get this far and it's successful */ - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); return 0; fail: if (mci) { if (pvt->dev_d0f0) pci_dev_put(pvt->dev_d0f0); + if (pvt->dev_d0f1) pci_dev_put(pvt->dev_d0f1); + if (pvt->bridge_ck) pci_dev_put(pvt->bridge_ck); + edac_mc_free(mci); } + return rc; } /* returns count (>= 0), or negative on error */ static int __devinit e752x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); /* wake up and enable device */ if(pci_enable_device(pdev) < 0) return -EIO; + return e752x_probe1(pdev, ent->driver_data); } - static void __devexit e752x_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct e752x_pvt *pvt; - debugf0(__FILE__ ": %s()\n", __func__); - - if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) - return; + debugf0("%s()\n", __func__); - if (edac_mc_del_mc(mci)) + if ((mci = edac_mc_del_mc(pdev)) == NULL) return; pvt = (struct e752x_pvt *) mci->pvt_info; @@ -1024,45 +1015,48 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) edac_mc_free(mci); } - static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { - {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7520}, - {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7525}, - {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7320}, - {0,} /* 0 terminated list. */ + { + PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7520 + }, + { + PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7525 + }, + { + PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7320 + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); - static struct pci_driver e752x_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = e752x_init_one, .remove = __devexit_p(e752x_remove_one), .id_table = e752x_pci_tbl, }; - static int __init e752x_init(void) { int pci_rc; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); pci_rc = pci_register_driver(&e752x_driver); return (pci_rc < 0) ? pci_rc : 0; } - static void __exit e752x_exit(void) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); pci_unregister_driver(&e752x_driver); } - module_init(e752x_init); module_exit(e752x_exit); diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index d5e320dfc66f..a9518d3e4be4 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c @@ -11,9 +11,9 @@ * http://www.anime.net/~goemon/linux-ecc/ * * Contributors: - * Eric Biederman (Linux Networx) - * Tom Zimmerman (Linux Networx) - * Jim Garlick (Lawrence Livermore National Labs) + * Eric Biederman (Linux Networx) + * Tom Zimmerman (Linux Networx) + * Jim Garlick (Lawrence Livermore National Labs) * Dave Peterson (Lawrence Livermore National Labs) * That One Guy (Some other place) * Wang Zhenyu (intel.com) @@ -22,7 +22,6 @@ * */ - #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -31,6 +30,11 @@ #include <linux/slab.h> #include "edac_mc.h" +#define e7xxx_printk(level, fmt, arg...) \ + edac_printk(level, "e7xxx", fmt, ##arg) + +#define e7xxx_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_7205_0 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d @@ -64,11 +68,9 @@ #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ - #define E7XXX_NR_CSROWS 8 /* number of csrows */ #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ - /* E7XXX register addresses - device 0 function 0 */ #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ @@ -118,7 +120,6 @@ enum e7xxx_chips { E7205, }; - struct e7xxx_pvt { struct pci_dev *bridge_ck; u32 tolm; @@ -127,13 +128,11 @@ struct e7xxx_pvt { const struct e7xxx_dev_info *dev_info; }; - struct e7xxx_dev_info { u16 err_dev; const char *ctl_name; }; - struct e7xxx_error_info { u8 dram_ferr; u8 dram_nerr; @@ -144,108 +143,110 @@ struct e7xxx_error_info { static const struct e7xxx_dev_info e7xxx_devs[] = { [E7500] = { - .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, - .ctl_name = "E7500"}, + .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, + .ctl_name = "E7500" + }, [E7501] = { - .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, - .ctl_name = "E7501"}, + .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, + .ctl_name = "E7501" + }, [E7505] = { - .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, - .ctl_name = "E7505"}, + .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, + .ctl_name = "E7505" + }, [E7205] = { - .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, - .ctl_name = "E7205"}, + .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, + .ctl_name = "E7205" + }, }; - /* FIXME - is this valid for both SECDED and S4ECD4ED? */ static inline int e7xxx_find_channel(u16 syndrome) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); if ((syndrome & 0xff00) == 0) return 0; + if ((syndrome & 0x00ff) == 0) return 1; + if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) return 0; + return 1; } - -static unsigned long -ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page) +static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, + unsigned long page) { u32 remap; struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); if ((page < pvt->tolm) || - ((page >= 0x100000) && (page < pvt->remapbase))) + ((page >= 0x100000) && (page < pvt->remapbase))) return page; + remap = (page - pvt->tolm) + pvt->remapbase; + if (remap < pvt->remaplimit) return remap; - printk(KERN_ERR "Invalid page %lx - out of range\n", page); + + e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); return pvt->tolm - 1; } - -static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) +static void process_ce(struct mem_ctl_info *mci, + struct e7xxx_error_info *info) { u32 error_1b, page; u16 syndrome; int row; int channel; - debugf3("MC: " __FILE__ ": %s()\n", __func__); - + debugf3("%s()\n", __func__); /* read the error address */ error_1b = info->dram_celog_add; /* FIXME - should use PAGE_SHIFT */ - page = error_1b >> 6; /* convert the address to 4k page */ + page = error_1b >> 6; /* convert the address to 4k page */ /* read the syndrome */ syndrome = info->dram_celog_syndrome; /* FIXME - check for -1 */ row = edac_mc_find_csrow_by_page(mci, page); /* convert syndrome to channel */ channel = e7xxx_find_channel(syndrome); - edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, - "e7xxx CE"); + edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); } - static void process_ce_no_info(struct mem_ctl_info *mci) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); } - -static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) +static void process_ue(struct mem_ctl_info *mci, + struct e7xxx_error_info *info) { u32 error_2b, block_page; int row; - debugf3("MC: " __FILE__ ": %s()\n", __func__); - + debugf3("%s()\n", __func__); /* read the error address */ error_2b = info->dram_uelog_add; /* FIXME - should use PAGE_SHIFT */ - block_page = error_2b >> 6; /* convert to 4k address */ + block_page = error_2b >> 6; /* convert to 4k address */ row = edac_mc_find_csrow_by_page(mci, block_page); edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); } - static void process_ue_no_info(struct mem_ctl_info *mci) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); } - static void e7xxx_get_error_info (struct mem_ctl_info *mci, struct e7xxx_error_info *info) { @@ -253,31 +254,29 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci, pvt = (struct e7xxx_pvt *) mci->pvt_info; pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, - &info->dram_ferr); + &info->dram_ferr); pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, - &info->dram_nerr); + &info->dram_nerr); if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, - &info->dram_celog_add); + &info->dram_celog_add); pci_read_config_word(pvt->bridge_ck, - E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); + E7XXX_DRAM_CELOG_SYNDROME, + &info->dram_celog_syndrome); } if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, - &info->dram_uelog_add); + &info->dram_uelog_add); if (info->dram_ferr & 3) - pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, - 0x03); + pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); if (info->dram_nerr & 3) - pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, - 0x03); + pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); } - static int e7xxx_process_error_info (struct mem_ctl_info *mci, struct e7xxx_error_info *info, int handle_errors) { @@ -325,17 +324,15 @@ static int e7xxx_process_error_info (struct mem_ctl_info *mci, return error_found; } - static void e7xxx_check(struct mem_ctl_info *mci) { struct e7xxx_error_info info; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); e7xxx_get_error_info(mci, &info); e7xxx_process_error_info(mci, &info, 1); } - static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; @@ -349,19 +346,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ u32 dra; unsigned long last_cumul_size; + struct e7xxx_error_info discard; - - debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); + debugf0("%s(): mci\n", __func__); /* need to find out the number of channels */ pci_read_config_dword(pdev, E7XXX_DRC, &drc); + /* only e7501 can be single channel */ if (dev_idx == E7501) { drc_chan = ((drc >> 22) & 0x1); drc_drbg = (drc >> 18) & 0x3; } - drc_ddim = (drc >> 20) & 0x3; + drc_ddim = (drc >> 20) & 0x3; mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); if (mci == NULL) { @@ -369,33 +367,31 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) goto fail; } - debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); - + debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; - mci->edac_ctl_cap = - EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | + EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ - mci->mod_name = BS_MOD_STR; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.5.2.9 $"; mci->pdev = pdev; - debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); + debugf3("%s(): init pvt\n", __func__); pvt = (struct e7xxx_pvt *) mci->pvt_info; pvt->dev_info = &e7xxx_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, - pvt->dev_info->err_dev, - pvt->bridge_ck); + pvt->dev_info->err_dev, + pvt->bridge_ck); + if (!pvt->bridge_ck) { - printk(KERN_ERR - "MC: error reporting device not found:" - "vendor %x device 0x%x (broken BIOS?)\n", - PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); + e7xxx_printk(KERN_ERR, "error reporting device not found:" + "vendor %x device 0x%x (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); goto fail; } - debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); + debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; - mci->edac_check = e7xxx_check; mci->ctl_page_to_phys = ctl_page_to_phys; @@ -418,17 +414,18 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); /* convert a 64 or 32 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); - debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", - __func__, index, cumul_size); + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, + cumul_size); + if (cumul_size == last_cumul_size) - continue; /* not populated */ + continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; - csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ - csrow->mtype = MEM_RDDR; /* only one type supported */ + csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ + csrow->mtype = MEM_RDDR; /* only one type supported */ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; /* @@ -449,8 +446,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) mci->edac_cap |= EDAC_FLAG_NONE; - debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", - __func__); + debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); pvt->tolm = ((u32) pci_data) << 4; @@ -458,22 +454,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) pvt->remapbase = ((u32) pci_data) << 14; pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); pvt->remaplimit = ((u32) pci_data) << 14; - printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, - pvt->remapbase, pvt->remaplimit); + e7xxx_printk(KERN_INFO, + "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, + pvt->remapbase, pvt->remaplimit); /* clear any pending errors, or initial state bits */ - pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); - pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); + e7xxx_get_error_info(mci, &discard); if (edac_mc_add_mc(mci) != 0) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", - __func__); + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); return 0; fail: @@ -487,62 +481,67 @@ fail: } /* returns count (>= 0), or negative on error */ -static int __devinit -e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit e7xxx_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); /* wake up and enable device */ return pci_enable_device(pdev) ? - -EIO : e7xxx_probe1(pdev, ent->driver_data); + -EIO : e7xxx_probe1(pdev, ent->driver_data); } - static void __devexit e7xxx_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct e7xxx_pvt *pvt; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); - if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) && - edac_mc_del_mc(mci)) { - pvt = (struct e7xxx_pvt *) mci->pvt_info; - pci_dev_put(pvt->bridge_ck); - edac_mc_free(mci); - } -} + if ((mci = edac_mc_del_mc(pdev)) == NULL) + return; + pvt = (struct e7xxx_pvt *) mci->pvt_info; + pci_dev_put(pvt->bridge_ck); + edac_mc_free(mci); +} static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { - {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7205}, - {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7500}, - {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7501}, - {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7505}, - {0,} /* 0 terminated list. */ + { + PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7205 + }, + { + PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7500 + }, + { + PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7501 + }, + { + PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7505 + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); - static struct pci_driver e7xxx_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = e7xxx_init_one, .remove = __devexit_p(e7xxx_remove_one), .id_table = e7xxx_pci_tbl, }; - static int __init e7xxx_init(void) { return pci_register_driver(&e7xxx_driver); } - static void __exit e7xxx_exit(void) { pci_unregister_driver(&e7xxx_driver); @@ -551,8 +550,7 @@ static void __exit e7xxx_exit(void) module_init(e7xxx_init); module_exit(e7xxx_exit); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" - "Based on.work by Dan Hollis et al"); + "Based on.work by Dan Hollis et al"); MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9c205274c1cb..ea06e3a4dc35 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -12,7 +12,6 @@ * */ - #include <linux/config.h> #include <linux/module.h> #include <linux/proc_fs.h> @@ -29,14 +28,13 @@ #include <linux/list.h> #include <linux/sysdev.h> #include <linux/ctype.h> - +#include <linux/kthread.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/edac.h> - #include "edac_mc.h" -#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ +#define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__ /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC * presents to user space needs more thought, and is likely to change @@ -47,7 +45,7 @@ #ifdef CONFIG_EDAC_DEBUG /* Values of 0 to 4 will generate output */ int edac_debug_level = 1; -EXPORT_SYMBOL(edac_debug_level); +EXPORT_SYMBOL_GPL(edac_debug_level); #endif /* EDAC Controls, setable by module parameter, and sysfs */ @@ -64,13 +62,14 @@ static atomic_t pci_parity_count = ATOMIC_INIT(0); static DECLARE_MUTEX(mem_ctls_mutex); static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); +static struct task_struct *edac_thread; + /* Structure of the whitelist and blacklist arrays */ struct edac_pci_device_list { unsigned int vendor; /* Vendor ID */ unsigned int device; /* Deviice ID */ }; - #define MAX_LISTED_PCI_DEVICES 32 /* List of PCI devices (vendor-id:device-id) that should be skipped */ @@ -123,7 +122,6 @@ static const char *edac_caps[] = { [EDAC_S16ECD16ED] = "S16ECD16ED" }; - /* sysfs object: /sys/devices/system/edac */ static struct sysdev_class edac_class = { set_kset_name("edac"), @@ -136,9 +134,15 @@ static struct sysdev_class edac_class = { static struct kobject edac_memctrl_kobj; static struct kobject edac_pci_kobj; +/* We use these to wait for the reference counts on edac_memctrl_kobj and + * edac_pci_kobj to reach 0. + */ +static struct completion edac_memctrl_kobj_complete; +static struct completion edac_pci_kobj_complete; + /* * /sys/devices/system/edac/mc; - * data structures and methods + * data structures and methods */ #if 0 static ssize_t memctrl_string_show(void *ptr, char *buffer) @@ -165,33 +169,34 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) } struct memctrl_dev_attribute { - struct attribute attr; - void *value; + struct attribute attr; + void *value; ssize_t (*show)(void *,char *); ssize_t (*store)(void *, const char *, size_t); }; /* Set of show/store abstract level functions for memory control object */ -static ssize_t -memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer) +static ssize_t memctrl_dev_show(struct kobject *kobj, + struct attribute *attr, char *buffer) { struct memctrl_dev_attribute *memctrl_dev; memctrl_dev = (struct memctrl_dev_attribute*)attr; if (memctrl_dev->show) return memctrl_dev->show(memctrl_dev->value, buffer); + return -EIO; } -static ssize_t -memctrl_dev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) +static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t count) { struct memctrl_dev_attribute *memctrl_dev; memctrl_dev = (struct memctrl_dev_attribute*)attr; if (memctrl_dev->store) return memctrl_dev->store(memctrl_dev->value, buffer, count); + return -EIO; } @@ -227,7 +232,6 @@ MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); - /* Base Attributes of the memory ECC object */ static struct memctrl_dev_attribute *memctrl_attr[] = { &attr_panic_on_ue, @@ -240,13 +244,14 @@ static struct memctrl_dev_attribute *memctrl_attr[] = { /* Main MC kobject release() function */ static void edac_memctrl_master_release(struct kobject *kobj) { - debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); + debugf1("%s()\n", __func__); + complete(&edac_memctrl_kobj_complete); } static struct kobj_type ktype_memctrl = { - .release = edac_memctrl_master_release, - .sysfs_ops = &memctrlfs_ops, - .default_attrs = (struct attribute **) memctrl_attr, + .release = edac_memctrl_master_release, + .sysfs_ops = &memctrlfs_ops, + .default_attrs = (struct attribute **) memctrl_attr, }; #endif /* DISABLE_EDAC_SYSFS */ @@ -268,32 +273,31 @@ static int edac_sysfs_memctrl_setup(void) { int err=0; - debugf1("MC: " __FILE__ ": %s()\n", __func__); + debugf1("%s()\n", __func__); /* create the /sys/devices/system/edac directory */ err = sysdev_class_register(&edac_class); + if (!err) { /* Init the MC's kobject */ memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); - kobject_init(&edac_memctrl_kobj); - edac_memctrl_kobj.parent = &edac_class.kset.kobj; edac_memctrl_kobj.ktype = &ktype_memctrl; /* generate sysfs "..../edac/mc" */ err = kobject_set_name(&edac_memctrl_kobj,"mc"); + if (!err) { /* FIXME: maybe new sysdev_create_subdir() */ err = kobject_register(&edac_memctrl_kobj); - if (err) { + + if (err) debugf1("Failed to register '.../edac/mc'\n"); - } else { + else debugf1("Registered '.../edac/mc' kobject\n"); - } } - } else { - debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err); - } + } else + debugf1("%s() error=%d\n", __func__, err); return err; } @@ -308,11 +312,12 @@ static void edac_sysfs_memctrl_teardown(void) #ifndef DISABLE_EDAC_SYSFS debugf0("MC: " __FILE__ ": %s()\n", __func__); - /* Unregister the MC's kobject */ + /* Unregister the MC's kobject and wait for reference count to reach + * 0. + */ + init_completion(&edac_memctrl_kobj_complete); kobject_unregister(&edac_memctrl_kobj); - - /* release the master edac mc kobject */ - kobject_put(&edac_memctrl_kobj); + wait_for_completion(&edac_memctrl_kobj_complete); /* Unregister the 'edac' object */ sysdev_class_unregister(&edac_class); @@ -331,7 +336,6 @@ struct list_control { int *count; }; - #if 0 /* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) @@ -356,7 +360,6 @@ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) } len += snprintf(p + len,(PAGE_SIZE-len), "\n"); - return (ssize_t) len; } @@ -378,7 +381,7 @@ static int parse_one_device(const char **s,const char **e, /* if null byte, we are done */ if (!**s) { - (*s)++; /* keep *s moving */ + (*s)++; /* keep *s moving */ return 0; } @@ -395,6 +398,7 @@ static int parse_one_device(const char **s,const char **e, /* parse vendor_id */ runner = *s; + while (runner < *e) { /* scan for vendor:device delimiter */ if (*runner == ':') { @@ -402,6 +406,7 @@ static int parse_one_device(const char **s,const char **e, runner = p + 1; break; } + runner++; } @@ -417,12 +422,11 @@ static int parse_one_device(const char **s,const char **e, } *s = runner; - return 1; } static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, - size_t count) + size_t count) { struct list_control *listctl; struct edac_pci_device_list *list; @@ -432,14 +436,12 @@ static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, s = (char*)buffer; e = s + count; - listctl = ptr; list = listctl->list; index = listctl->count; - *index = 0; - while (*index < MAX_LISTED_PCI_DEVICES) { + while (*index < MAX_LISTED_PCI_DEVICES) { if (parse_one_device(&s,&e,&vendor_id,&device_id)) { list[ *index ].vendor = vendor_id; list[ *index ].device = device_id; @@ -472,15 +474,15 @@ static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count) } struct edac_pci_dev_attribute { - struct attribute attr; - void *value; + struct attribute attr; + void *value; ssize_t (*show)(void *,char *); ssize_t (*store)(void *, const char *,size_t); }; /* Set of show/store abstract level functions for PCI Parity object */ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) + char *buffer) { struct edac_pci_dev_attribute *edac_pci_dev; edac_pci_dev= (struct edac_pci_dev_attribute*)attr; @@ -490,8 +492,8 @@ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, return -EIO; } -static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) +static ssize_t edac_pci_dev_store(struct kobject *kobj, + struct attribute *attr, const char *buffer, size_t count) { struct edac_pci_dev_attribute *edac_pci_dev; edac_pci_dev= (struct edac_pci_dev_attribute*)attr; @@ -506,7 +508,6 @@ static struct sysfs_ops edac_pci_sysfs_ops = { .store = edac_pci_dev_store }; - #define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ @@ -549,9 +550,11 @@ EDAC_PCI_STRING_ATTR(pci_parity_blacklist, #endif /* PCI Parity control files */ -EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); -EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); -EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL); +EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); /* Base Attributes of the memory ECC object */ static struct edac_pci_dev_attribute *edac_pci_attr[] = { @@ -564,13 +567,14 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = { /* No memory to release */ static void edac_pci_release(struct kobject *kobj) { - debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__); + debugf1("%s()\n", __func__); + complete(&edac_pci_kobj_complete); } static struct kobj_type ktype_edac_pci = { - .release = edac_pci_release, - .sysfs_ops = &edac_pci_sysfs_ops, - .default_attrs = (struct attribute **) edac_pci_attr, + .release = edac_pci_release, + .sysfs_ops = &edac_pci_sysfs_ops, + .default_attrs = (struct attribute **) edac_pci_attr, }; #endif /* DISABLE_EDAC_SYSFS */ @@ -588,24 +592,24 @@ static int edac_sysfs_pci_setup(void) { int err; - debugf1("MC: " __FILE__ ": %s()\n", __func__); + debugf1("%s()\n", __func__); memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); - - kobject_init(&edac_pci_kobj); edac_pci_kobj.parent = &edac_class.kset.kobj; edac_pci_kobj.ktype = &ktype_edac_pci; - err = kobject_set_name(&edac_pci_kobj, "pci"); + if (!err) { /* Instanstiate the csrow object */ /* FIXME: maybe new sysdev_create_subdir() */ err = kobject_register(&edac_pci_kobj); + if (err) debugf1("Failed to register '.../edac/pci'\n"); else debugf1("Registered '.../edac/pci' kobject\n"); } + return err; } #endif /* DISABLE_EDAC_SYSFS */ @@ -613,10 +617,10 @@ static int edac_sysfs_pci_setup(void) static void edac_sysfs_pci_teardown(void) { #ifndef DISABLE_EDAC_SYSFS - debugf0("MC: " __FILE__ ": %s()\n", __func__); - + debugf0("%s()\n", __func__); + init_completion(&edac_pci_kobj_complete); kobject_unregister(&edac_pci_kobj); - kobject_put(&edac_pci_kobj); + wait_for_completion(&edac_pci_kobj_complete); #endif } @@ -633,6 +637,7 @@ static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data) size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", csrow->channels[0].label); } + return size; } @@ -644,11 +649,12 @@ static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data) size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", csrow->channels[1].label); } + return size; } static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, - const char *data, size_t size) + const char *data, size_t size) { ssize_t max_size = 0; @@ -657,11 +663,12 @@ static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, strncpy(csrow->channels[0].label, data, max_size); csrow->channels[0].label[max_size] = '\0'; } + return size; } static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, - const char *data, size_t size) + const char *data, size_t size) { ssize_t max_size = 0; @@ -670,6 +677,7 @@ static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, strncpy(csrow->channels[1].label, data, max_size); csrow->channels[1].label[max_size] = '\0'; } + return max_size; } @@ -690,6 +698,7 @@ static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data) if (csrow->nr_channels > 0) { size = sprintf(data,"%u\n", csrow->channels[0].ce_count); } + return size; } @@ -700,6 +709,7 @@ static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data) if (csrow->nr_channels > 1) { size = sprintf(data,"%u\n", csrow->channels[1].ce_count); } + return size; } @@ -724,7 +734,7 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data) } struct csrowdev_attribute { - struct attribute attr; + struct attribute attr; ssize_t (*show)(struct csrow_info *,char *); ssize_t (*store)(struct csrow_info *, const char *,size_t); }; @@ -734,24 +744,26 @@ struct csrowdev_attribute { /* Set of show/store higher level functions for csrow objects */ static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) + char *buffer) { struct csrow_info *csrow = to_csrow(kobj); struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); if (csrowdev_attr->show) return csrowdev_attr->show(csrow, buffer); + return -EIO; } static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) + const char *buffer, size_t count) { struct csrow_info *csrow = to_csrow(kobj); struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); if (csrowdev_attr->store) return csrowdev_attr->store(csrow, buffer, count); + return -EIO; } @@ -785,7 +797,6 @@ CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR, csrow_ch1_dimm_label_show, csrow_ch1_dimm_label_store); - /* Attributes of the CSROW<id> object */ static struct csrowdev_attribute *csrow_attr[] = { &attr_dev_type, @@ -801,40 +812,43 @@ static struct csrowdev_attribute *csrow_attr[] = { NULL, }; - /* No memory to release */ static void edac_csrow_instance_release(struct kobject *kobj) { - debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); + struct csrow_info *cs; + + debugf1("%s()\n", __func__); + cs = container_of(kobj, struct csrow_info, kobj); + complete(&cs->kobj_complete); } static struct kobj_type ktype_csrow = { - .release = edac_csrow_instance_release, - .sysfs_ops = &csrowfs_ops, - .default_attrs = (struct attribute **) csrow_attr, + .release = edac_csrow_instance_release, + .sysfs_ops = &csrowfs_ops, + .default_attrs = (struct attribute **) csrow_attr, }; /* Create a CSROW object under specifed edac_mc_device */ static int edac_create_csrow_object(struct kobject *edac_mci_kobj, - struct csrow_info *csrow, int index ) + struct csrow_info *csrow, int index) { int err = 0; - debugf0("MC: " __FILE__ ": %s()\n", __func__); - + debugf0("%s()\n", __func__); memset(&csrow->kobj, 0, sizeof(csrow->kobj)); /* generate ..../edac/mc/mc<id>/csrow<index> */ - kobject_init(&csrow->kobj); csrow->kobj.parent = edac_mci_kobj; csrow->kobj.ktype = &ktype_csrow; /* name this instance of csrow<id> */ err = kobject_set_name(&csrow->kobj,"csrow%d",index); + if (!err) { /* Instanstiate the csrow object */ err = kobject_register(&csrow->kobj); + if (err) debugf0("Failed to register CSROW%d\n",index); else @@ -846,8 +860,8 @@ static int edac_create_csrow_object(struct kobject *edac_mci_kobj, /* sysfs data structures and methods for the MCI kobjects */ -static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, - const char *data, size_t count ) +static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, + const char *data, size_t count) { int row, chan; @@ -855,16 +869,18 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, mci->ce_noinfo_count = 0; mci->ue_count = 0; mci->ce_count = 0; + for (row = 0; row < mci->nr_csrows; row++) { struct csrow_info *ri = &mci->csrows[row]; ri->ue_count = 0; ri->ce_count = 0; + for (chan = 0; chan < ri->nr_channels; chan++) ri->channels[chan].ce_count = 0; } - mci->start_time = jiffies; + mci->start_time = jiffies; return count; } @@ -922,18 +938,16 @@ static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data) p += mci_output_edac_cap(p,mci->edac_ctl_cap); p += sprintf(p, "\n"); - return p - data; } static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, - char *data) + char *data) { char *p = data; p += mci_output_edac_cap(p,mci->edac_cap); p += sprintf(p, "\n"); - return p - data; } @@ -950,13 +964,13 @@ static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap) return p - buf; } -static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, + char *data) { char *p = data; p += mci_output_mtype_cap(p,mci->mtype_cap); p += sprintf(p, "\n"); - return p - data; } @@ -970,6 +984,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) if (!csrow->nr_pages) continue; + total_pages += csrow->nr_pages; } @@ -977,7 +992,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) } struct mcidev_attribute { - struct attribute attr; + struct attribute attr; ssize_t (*show)(struct mem_ctl_info *,char *); ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); }; @@ -986,30 +1001,32 @@ struct mcidev_attribute { #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) + char *buffer) { struct mem_ctl_info *mem_ctl_info = to_mci(kobj); struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); if (mcidev_attr->show) return mcidev_attr->show(mem_ctl_info, buffer); + return -EIO; } static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) + const char *buffer, size_t count) { struct mem_ctl_info *mem_ctl_info = to_mci(kobj); struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); if (mcidev_attr->store) return mcidev_attr->store(mem_ctl_info, buffer, count); + return -EIO; } static struct sysfs_ops mci_ops = { - .show = mcidev_show, - .store = mcidev_store + .show = mcidev_show, + .store = mcidev_store }; #define MCIDEV_ATTR(_name,_mode,_show,_store) \ @@ -1037,7 +1054,6 @@ MCIDEV_ATTR(edac_current_capability,S_IRUGO, MCIDEV_ATTR(supported_mem_type,S_IRUGO, mci_supported_mem_type_show,NULL); - static struct mcidev_attribute *mci_attr[] = { &mci_attr_reset_counters, &mci_attr_module_name, @@ -1054,25 +1070,22 @@ static struct mcidev_attribute *mci_attr[] = { NULL }; - /* * Release of a MC controlling instance */ static void edac_mci_instance_release(struct kobject *kobj) { struct mem_ctl_info *mci; - mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj); - debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n", - __func__, mci->mc_idx); - - kfree(mci); + mci = to_mci(kobj); + debugf0("%s() idx=%d\n", __func__, mci->mc_idx); + complete(&mci->kobj_complete); } static struct kobj_type ktype_mci = { - .release = edac_mci_instance_release, - .sysfs_ops = &mci_ops, - .default_attrs = (struct attribute **) mci_attr, + .release = edac_mci_instance_release, + .sysfs_ops = &mci_ops, + .default_attrs = (struct attribute **) mci_attr, }; #endif /* DISABLE_EDAC_SYSFS */ @@ -1099,13 +1112,12 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) struct csrow_info *csrow; struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; - debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx); - + debugf0("%s() idx=%d\n", __func__, mci->mc_idx); memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); - kobject_init(edac_mci_kobj); /* set the name of the mc<id> object */ err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); + if (err) return err; @@ -1115,50 +1127,48 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) /* register the mc<id> kobject */ err = kobject_register(edac_mci_kobj); + if (err) return err; /* create a symlink for the device */ err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, EDAC_DEVICE_SYMLINK); - if (err) { - kobject_unregister(edac_mci_kobj); - return err; - } + + if (err) + goto fail0; /* Make directories for each CSROW object * under the mc<id> kobject */ for (i = 0; i < mci->nr_csrows; i++) { - csrow = &mci->csrows[i]; /* Only expose populated CSROWs */ if (csrow->nr_pages > 0) { err = edac_create_csrow_object(edac_mci_kobj,csrow,i); + if (err) - goto fail; + goto fail1; } } - /* Mark this MCI instance as having sysfs entries */ - mci->sysfs_active = MCI_SYSFS_ACTIVE; - return 0; - /* CSROW error: backout what has already been registered, */ -fail: +fail1: for ( i--; i >= 0; i--) { if (csrow->nr_pages > 0) { + init_completion(&csrow->kobj_complete); kobject_unregister(&mci->csrows[i].kobj); - kobject_put(&mci->csrows[i].kobj); + wait_for_completion(&csrow->kobj_complete); } } +fail0: + init_completion(&mci->kobj_complete); kobject_unregister(edac_mci_kobj); - kobject_put(edac_mci_kobj); - + wait_for_completion(&mci->kobj_complete); return err; } #endif /* DISABLE_EDAC_SYSFS */ @@ -1171,20 +1181,21 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) #ifndef DISABLE_EDAC_SYSFS int i; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); /* remove all csrow kobjects */ for (i = 0; i < mci->nr_csrows; i++) { - if (mci->csrows[i].nr_pages > 0) { + if (mci->csrows[i].nr_pages > 0) { + init_completion(&mci->csrows[i].kobj_complete); kobject_unregister(&mci->csrows[i].kobj); - kobject_put(&mci->csrows[i].kobj); + wait_for_completion(&mci->csrows[i].kobj_complete); } } sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); - + init_completion(&mci->kobj_complete); kobject_unregister(&mci->edac_mci_kobj); - kobject_put(&mci->edac_mci_kobj); + wait_for_completion(&mci->kobj_complete); #endif /* DISABLE_EDAC_SYSFS */ } @@ -1192,8 +1203,6 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) #ifdef CONFIG_EDAC_DEBUG -EXPORT_SYMBOL(edac_mc_dump_channel); - void edac_mc_dump_channel(struct channel_info *chan) { debugf4("\tchannel = %p\n", chan); @@ -1202,9 +1211,7 @@ void edac_mc_dump_channel(struct channel_info *chan) debugf4("\tchannel->label = '%s'\n", chan->label); debugf4("\tchannel->csrow = %p\n\n", chan->csrow); } - - -EXPORT_SYMBOL(edac_mc_dump_csrow); +EXPORT_SYMBOL_GPL(edac_mc_dump_channel); void edac_mc_dump_csrow(struct csrow_info *csrow) { @@ -1220,9 +1227,7 @@ void edac_mc_dump_csrow(struct csrow_info *csrow) debugf4("\tcsrow->channels = %p\n", csrow->channels); debugf4("\tcsrow->mci = %p\n\n", csrow->mci); } - - -EXPORT_SYMBOL(edac_mc_dump_mci); +EXPORT_SYMBOL_GPL(edac_mc_dump_csrow); void edac_mc_dump_mci(struct mem_ctl_info *mci) { @@ -1238,9 +1243,9 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci) mci->mod_name, mci->ctl_name); debugf3("\tpvt_info = %p\n\n", mci->pvt_info); } +EXPORT_SYMBOL_GPL(edac_mc_dump_mci); - -#endif /* CONFIG_EDAC_DEBUG */ +#endif /* CONFIG_EDAC_DEBUG */ /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. * Adjust 'ptr' so that its alignment is at least as stringent as what the @@ -1249,7 +1254,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci) * If 'size' is a constant, the compiler will optimize this whole function * down to either a no-op or the addition of a constant to the value of 'ptr'. */ -static inline char * align_ptr (void *ptr, unsigned size) +static inline char * align_ptr(void *ptr, unsigned size) { unsigned align, r; @@ -1276,9 +1281,6 @@ static inline char * align_ptr (void *ptr, unsigned size) return (char *) (((unsigned long) ptr) + align - r); } - -EXPORT_SYMBOL(edac_mc_alloc); - /** * edac_mc_alloc: Allocate a struct mem_ctl_info structure * @size_pvt: size of private storage needed @@ -1296,7 +1298,7 @@ EXPORT_SYMBOL(edac_mc_alloc); * struct mem_ctl_info pointer */ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, - unsigned nr_chans) + unsigned nr_chans) { struct mem_ctl_info *mci; struct csrow_info *csi, *csrow; @@ -1327,8 +1329,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; - memset(mci, 0, size); /* clear all fields */ - + memset(mci, 0, size); /* clear all fields */ mci->csrows = csi; mci->pvt_info = pvt; mci->nr_csrows = nr_csrows; @@ -1350,50 +1351,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, return mci; } - - -EXPORT_SYMBOL(edac_mc_free); +EXPORT_SYMBOL_GPL(edac_mc_alloc); /** * edac_mc_free: Free a previously allocated 'mci' structure * @mci: pointer to a struct mem_ctl_info structure - * - * Free up a previously allocated mci structure - * A MCI structure can be in 2 states after being allocated - * by edac_mc_alloc(). - * 1) Allocated in a MC driver's probe, but not yet committed - * 2) Allocated and committed, by a call to edac_mc_add_mc() - * edac_mc_add_mc() is the function that adds the sysfs entries - * thus, this free function must determine which state the 'mci' - * structure is in, then either free it directly or - * perform kobject cleanup by calling edac_remove_sysfs_mci_device(). - * - * VOID Return */ void edac_mc_free(struct mem_ctl_info *mci) { - /* only if sysfs entries for this mci instance exist - * do we remove them and defer the actual kfree via - * the kobject 'release()' callback. - * - * Otherwise, do a straight kfree now. - */ - if (mci->sysfs_active == MCI_SYSFS_ACTIVE) - edac_remove_sysfs_mci_device(mci); - else - kfree(mci); + kfree(mci); } +EXPORT_SYMBOL_GPL(edac_mc_free); - - -EXPORT_SYMBOL(edac_mc_find_mci_by_pdev); - -struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev) +static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct list_head *item; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); @@ -1405,7 +1380,7 @@ struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev) return NULL; } -static int add_mc_to_global_list (struct mem_ctl_info *mci) +static int add_mc_to_global_list(struct mem_ctl_info *mci) { struct list_head *item, *insert_before; struct mem_ctl_info *p; @@ -1415,11 +1390,12 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci) mci->mc_idx = 0; insert_before = &mc_devices; } else { - if (edac_mc_find_mci_by_pdev(mci->pdev)) { - printk(KERN_WARNING - "EDAC MC: %s (%s) %s %s already assigned %d\n", - mci->pdev->dev.bus_id, pci_name(mci->pdev), - mci->mod_name, mci->ctl_name, mci->mc_idx); + if (find_mci_by_pdev(mci->pdev)) { + edac_printk(KERN_WARNING, EDAC_MC, + "%s (%s) %s %s already assigned %d\n", + mci->pdev->dev.bus_id, + pci_name(mci->pdev), mci->mod_name, + mci->ctl_name, mci->mc_idx); return 1; } @@ -1447,12 +1423,26 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci) return 0; } +static void complete_mc_list_del(struct rcu_head *head) +{ + struct mem_ctl_info *mci; + mci = container_of(head, struct mem_ctl_info, rcu); + INIT_LIST_HEAD(&mci->link); + complete(&mci->complete); +} -EXPORT_SYMBOL(edac_mc_add_mc); +static void del_mc_from_global_list(struct mem_ctl_info *mci) +{ + list_del_rcu(&mci->link); + init_completion(&mci->complete); + call_rcu(&mci->rcu, complete_mc_list_del); + wait_for_completion(&mci->complete); +} /** - * edac_mc_add_mc: Insert the 'mci' structure into the mci global list + * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and + * create sysfs entries associated with mci structure * @mci: pointer to the mci structure to be added to the list * * Return: @@ -1463,111 +1453,90 @@ EXPORT_SYMBOL(edac_mc_add_mc); /* FIXME - should a warning be printed if no error detection? correction? */ int edac_mc_add_mc(struct mem_ctl_info *mci) { - int rc = 1; - - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) edac_mc_dump_mci(mci); + if (edac_debug_level >= 4) { int i; for (i = 0; i < mci->nr_csrows; i++) { int j; + edac_mc_dump_csrow(&mci->csrows[i]); for (j = 0; j < mci->csrows[i].nr_channels; j++) - edac_mc_dump_channel(&mci->csrows[i]. - channels[j]); + edac_mc_dump_channel( + &mci->csrows[i].channels[j]); } } #endif down(&mem_ctls_mutex); if (add_mc_to_global_list(mci)) - goto finish; + goto fail0; /* set load time so that error rate can be tracked */ mci->start_time = jiffies; if (edac_create_sysfs_mci_device(mci)) { - printk(KERN_WARNING - "EDAC MC%d: failed to create sysfs device\n", - mci->mc_idx); - /* FIXME - should there be an error code and unwind? */ - goto finish; + edac_mc_printk(mci, KERN_WARNING, + "failed to create sysfs device\n"); + goto fail1; } /* Report action taken */ - printk(KERN_INFO - "EDAC MC%d: Giving out device to %s %s: PCI %s\n", - mci->mc_idx, mci->mod_name, mci->ctl_name, - pci_name(mci->pdev)); + edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n", + mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); - - rc = 0; - -finish: up(&mem_ctls_mutex); - return rc; -} - - - -static void complete_mc_list_del (struct rcu_head *head) -{ - struct mem_ctl_info *mci; + return 0; - mci = container_of(head, struct mem_ctl_info, rcu); - INIT_LIST_HEAD(&mci->link); - complete(&mci->complete); -} +fail1: + del_mc_from_global_list(mci); -static void del_mc_from_global_list (struct mem_ctl_info *mci) -{ - list_del_rcu(&mci->link); - init_completion(&mci->complete); - call_rcu(&mci->rcu, complete_mc_list_del); - wait_for_completion(&mci->complete); +fail0: + up(&mem_ctls_mutex); + return 1; } - -EXPORT_SYMBOL(edac_mc_del_mc); +EXPORT_SYMBOL_GPL(edac_mc_add_mc); /** - * edac_mc_del_mc: Remove the specified mci structure from global list - * @mci: Pointer to struct mem_ctl_info structure + * edac_mc_del_mc: Remove sysfs entries for specified mci structure and + * remove mci structure from global list + * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove. * - * Returns: - * 0 Success - * 1 Failure + * Return pointer to removed mci structure, or NULL if device not found. */ -int edac_mc_del_mc(struct mem_ctl_info *mci) +struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev) { - int rc = 1; + struct mem_ctl_info *mci; - debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf0("MC: %s()\n", __func__); down(&mem_ctls_mutex); + + if ((mci = find_mci_by_pdev(pdev)) == NULL) { + up(&mem_ctls_mutex); + return NULL; + } + + edac_remove_sysfs_mci_device(mci); del_mc_from_global_list(mci); - printk(KERN_INFO - "EDAC MC%d: Removed device %d for %s %s: PCI %s\n", - mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name, - pci_name(mci->pdev)); - rc = 0; up(&mem_ctls_mutex); - - return rc; + edac_printk(KERN_INFO, EDAC_MC, + "Removed device %d for %s %s: PCI %s\n", mci->mc_idx, + mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); + return mci; } +EXPORT_SYMBOL_GPL(edac_mc_del_mc); - -EXPORT_SYMBOL(edac_mc_scrub_block); - -void edac_mc_scrub_block(unsigned long page, unsigned long offset, - u32 size) +void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) { struct page *pg; void *virt_addr; unsigned long flags = 0; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); /* ECC error page was not in our memory. Ignore it. */ if(!pfn_valid(page)) @@ -1590,19 +1559,15 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, if (PageHighMem(pg)) local_irq_restore(flags); } - +EXPORT_SYMBOL_GPL(edac_mc_scrub_block); /* FIXME - should return -1 */ -EXPORT_SYMBOL(edac_mc_find_csrow_by_page); - -int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, - unsigned long page) +int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) { struct csrow_info *csrows = mci->csrows; int row, i; - debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__, - page); + debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); row = -1; for (i = 0; i < mci->nr_csrows; i++) { @@ -1611,11 +1576,10 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, if (csrow->nr_pages == 0) continue; - debugf3("MC%d: " __FILE__ - ": %s(): first(0x%lx) page(0x%lx)" - " last(0x%lx) mask(0x%lx)\n", mci->mc_idx, - __func__, csrow->first_page, page, - csrow->last_page, csrow->page_mask); + debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " + "mask(0x%lx)\n", mci->mc_idx, __func__, + csrow->first_page, page, csrow->last_page, + csrow->page_mask); if ((page >= csrow->first_page) && (page <= csrow->last_page) && @@ -1627,56 +1591,52 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, } if (row == -1) - printk(KERN_ERR - "EDAC MC%d: could not look up page error address %lx\n", - mci->mc_idx, (unsigned long) page); + edac_mc_printk(mci, KERN_ERR, + "could not look up page error address %lx\n", + (unsigned long) page); return row; } - - -EXPORT_SYMBOL(edac_mc_handle_ce); +EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); /* FIXME - setable log (warning/emerg) levels */ /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ void edac_mc_handle_ce(struct mem_ctl_info *mci, - unsigned long page_frame_number, - unsigned long offset_in_page, - unsigned long syndrome, int row, int channel, - const char *msg) + unsigned long page_frame_number, unsigned long offset_in_page, + unsigned long syndrome, int row, int channel, const char *msg) { unsigned long remapped_page; - debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf3("MC%d: %s()\n", mci->mc_idx, __func__); /* FIXME - maybe make panic on INTERNAL ERROR an option */ if (row >= mci->nr_csrows || row < 0) { /* something is wrong */ - printk(KERN_ERR - "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", - mci->mc_idx, row, mci->nr_csrows); + edac_mc_printk(mci, KERN_ERR, + "INTERNAL ERROR: row out of range " + "(%d >= %d)\n", row, mci->nr_csrows); edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); return; } + if (channel >= mci->csrows[row].nr_channels || channel < 0) { /* something is wrong */ - printk(KERN_ERR - "EDAC MC%d: INTERNAL ERROR: channel out of range " - "(%d >= %d)\n", - mci->mc_idx, channel, mci->csrows[row].nr_channels); + edac_mc_printk(mci, KERN_ERR, + "INTERNAL ERROR: channel out of range " + "(%d >= %d)\n", channel, + mci->csrows[row].nr_channels); edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); return; } if (log_ce) /* FIXME - put in DIMM location */ - printk(KERN_WARNING - "EDAC MC%d: CE page 0x%lx, offset 0x%lx," - " grain %d, syndrome 0x%lx, row %d, channel %d," - " label \"%s\": %s\n", mci->mc_idx, - page_frame_number, offset_in_page, - mci->csrows[row].grain, syndrome, row, channel, - mci->csrows[row].channels[channel].label, msg); + edac_mc_printk(mci, KERN_WARNING, + "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " + "0x%lx, row %d, channel %d, label \"%s\": %s\n", + page_frame_number, offset_in_page, + mci->csrows[row].grain, syndrome, row, channel, + mci->csrows[row].channels[channel].label, msg); mci->ce_count++; mci->csrows[row].ce_count++; @@ -1697,31 +1657,25 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci, page_frame_number; edac_mc_scrub_block(remapped_page, offset_in_page, - mci->csrows[row].grain); + mci->csrows[row].grain); } } +EXPORT_SYMBOL_GPL(edac_mc_handle_ce); - -EXPORT_SYMBOL(edac_mc_handle_ce_no_info); - -void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, - const char *msg) +void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) { if (log_ce) - printk(KERN_WARNING - "EDAC MC%d: CE - no information available: %s\n", - mci->mc_idx, msg); + edac_mc_printk(mci, KERN_WARNING, + "CE - no information available: %s\n", msg); + mci->ce_noinfo_count++; mci->ce_count++; } - - -EXPORT_SYMBOL(edac_mc_handle_ue); +EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info); void edac_mc_handle_ue(struct mem_ctl_info *mci, - unsigned long page_frame_number, - unsigned long offset_in_page, int row, - const char *msg) + unsigned long page_frame_number, unsigned long offset_in_page, + int row, const char *msg) { int len = EDAC_MC_LABEL_LEN * 4; char labels[len + 1]; @@ -1729,65 +1683,61 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci, int chan; int chars; - debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf3("MC%d: %s()\n", mci->mc_idx, __func__); /* FIXME - maybe make panic on INTERNAL ERROR an option */ if (row >= mci->nr_csrows || row < 0) { /* something is wrong */ - printk(KERN_ERR - "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", - mci->mc_idx, row, mci->nr_csrows); + edac_mc_printk(mci, KERN_ERR, + "INTERNAL ERROR: row out of range " + "(%d >= %d)\n", row, mci->nr_csrows); edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); return; } chars = snprintf(pos, len + 1, "%s", - mci->csrows[row].channels[0].label); + mci->csrows[row].channels[0].label); len -= chars; pos += chars; + for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); chan++) { chars = snprintf(pos, len + 1, ":%s", - mci->csrows[row].channels[chan].label); + mci->csrows[row].channels[chan].label); len -= chars; pos += chars; } if (log_ue) - printk(KERN_EMERG - "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," - " labels \"%s\": %s\n", mci->mc_idx, - page_frame_number, offset_in_page, - mci->csrows[row].grain, row, labels, msg); + edac_mc_printk(mci, KERN_EMERG, + "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " + "labels \"%s\": %s\n", page_frame_number, + offset_in_page, mci->csrows[row].grain, row, labels, + msg); if (panic_on_ue) - panic - ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," - " labels \"%s\": %s\n", mci->mc_idx, - page_frame_number, offset_in_page, - mci->csrows[row].grain, row, labels, msg); + panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " + "row %d, labels \"%s\": %s\n", mci->mc_idx, + page_frame_number, offset_in_page, + mci->csrows[row].grain, row, labels, msg); mci->ue_count++; mci->csrows[row].ue_count++; } +EXPORT_SYMBOL_GPL(edac_mc_handle_ue); - -EXPORT_SYMBOL(edac_mc_handle_ue_no_info); - -void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, - const char *msg) +void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) { if (panic_on_ue) panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); if (log_ue) - printk(KERN_WARNING - "EDAC MC%d: UE - no information available: %s\n", - mci->mc_idx, msg); + edac_mc_printk(mci, KERN_WARNING, + "UE - no information available: %s\n", msg); mci->ue_noinfo_count++; mci->ue_count++; } - +EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); #ifdef CONFIG_PCI @@ -1799,18 +1749,22 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) where = secondary ? PCI_SEC_STATUS : PCI_STATUS; pci_read_config_word(dev, where, &status); - /* If we get back 0xFFFF then we must suspect that the card has been pulled but - the Linux PCI layer has not yet finished cleaning up. We don't want to report - on such devices */ + /* If we get back 0xFFFF then we must suspect that the card has been + * pulled but the Linux PCI layer has not yet finished cleaning up. + * We don't want to report on such devices + */ if (status == 0xFFFF) { u32 sanity; + pci_read_config_dword(dev, 0, &sanity); + if (sanity == 0xFFFFFFFF) return 0; } + status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | - PCI_STATUS_PARITY; + PCI_STATUS_PARITY; if (status) /* reset only the bits we are interested in */ @@ -1822,7 +1776,7 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); /* Clear any PCI parity errors logged by this device. */ -static void edac_pci_dev_parity_clear( struct pci_dev *dev ) +static void edac_pci_dev_parity_clear(struct pci_dev *dev) { u8 header_type; @@ -1853,25 +1807,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) /* check the status reg for errors */ if (status) { if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) - printk(KERN_CRIT - "EDAC PCI- " + edac_printk(KERN_CRIT, EDAC_PCI, "Signaled System Error on %s\n", - pci_name (dev)); + pci_name(dev)); if (status & (PCI_STATUS_PARITY)) { - printk(KERN_CRIT - "EDAC PCI- " + edac_printk(KERN_CRIT, EDAC_PCI, "Master Data Parity Error on %s\n", - pci_name (dev)); + pci_name(dev)); atomic_inc(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { - printk(KERN_CRIT - "EDAC PCI- " + edac_printk(KERN_CRIT, EDAC_PCI, "Detected Parity Error on %s\n", - pci_name (dev)); + pci_name(dev)); atomic_inc(&pci_parity_count); } @@ -1892,25 +1843,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) /* check the secondary status reg for errors */ if (status) { if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) - printk(KERN_CRIT - "EDAC PCI-Bridge- " + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " "Signaled System Error on %s\n", - pci_name (dev)); + pci_name(dev)); if (status & (PCI_STATUS_PARITY)) { - printk(KERN_CRIT - "EDAC PCI-Bridge- " - "Master Data Parity Error on %s\n", - pci_name (dev)); + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Master Data Parity Error on " + "%s\n", pci_name(dev)); atomic_inc(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { - printk(KERN_CRIT - "EDAC PCI-Bridge- " + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " "Detected Parity Error on %s\n", - pci_name (dev)); + pci_name(dev)); atomic_inc(&pci_parity_count); } @@ -1929,58 +1877,55 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) * Returns: 0 not found * 1 found on list */ -static int check_dev_on_list(struct edac_pci_device_list *list, int free_index, - struct pci_dev *dev) -{ - int i; - int rc = 0; /* Assume not found */ - unsigned short vendor=dev->vendor; - unsigned short device=dev->device; - - /* Scan the list, looking for a vendor/device match - */ - for (i = 0; i < free_index; i++, list++ ) { - if ( (list->vendor == vendor ) && - (list->device == device )) { - rc = 1; - break; - } - } +static int check_dev_on_list(struct edac_pci_device_list *list, + int free_index, struct pci_dev *dev) +{ + int i; + int rc = 0; /* Assume not found */ + unsigned short vendor=dev->vendor; + unsigned short device=dev->device; + + /* Scan the list, looking for a vendor/device match */ + for (i = 0; i < free_index; i++, list++ ) { + if ((list->vendor == vendor ) && (list->device == device )) { + rc = 1; + break; + } + } - return rc; + return rc; } /* * pci_dev parity list iterator - * Scan the PCI device list for one iteration, looking for SERRORs + * Scan the PCI device list for one iteration, looking for SERRORs * Master Parity ERRORS or Parity ERRORs on primary or secondary devices */ static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) { - struct pci_dev *dev=NULL; + struct pci_dev *dev = NULL; /* request for kernel access to the next PCI device, if any, * and while we are looking at it have its reference count * bumped until we are done with it */ while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - - /* if whitelist exists then it has priority, so only scan those - * devices on the whitelist - */ - if (pci_whitelist_count > 0 ) { - if (check_dev_on_list(pci_whitelist, + /* if whitelist exists then it has priority, so only scan + * those devices on the whitelist + */ + if (pci_whitelist_count > 0 ) { + if (check_dev_on_list(pci_whitelist, pci_whitelist_count, dev)) fn(dev); - } else { + } else { /* * if no whitelist, then check if this devices is * blacklisted */ - if (!check_dev_on_list(pci_blacklist, + if (!check_dev_on_list(pci_blacklist, pci_blacklist_count, dev)) fn(dev); - } + } } } @@ -1989,7 +1934,7 @@ static void do_pci_parity_check(void) unsigned long flags; int before_count; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); if (!check_pci_parity) return; @@ -2011,7 +1956,6 @@ static void do_pci_parity_check(void) } } - static inline void clear_pci_parity_errors(void) { /* Clear any PCI bus parity errors that devices initially have logged @@ -2020,37 +1964,30 @@ static inline void clear_pci_parity_errors(void) edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); } - #else /* CONFIG_PCI */ - static inline void do_pci_parity_check(void) { /* no-op */ } - static inline void clear_pci_parity_errors(void) { /* no-op */ } - #endif /* CONFIG_PCI */ /* * Iterate over all MC instances and check for ECC, et al, errors */ -static inline void check_mc_devices (void) +static inline void check_mc_devices(void) { - unsigned long flags; struct list_head *item; struct mem_ctl_info *mci; - debugf3("MC: " __FILE__ ": %s()\n", __func__); - - /* during poll, have interrupts off */ - local_irq_save(flags); + debugf3("%s()\n", __func__); + down(&mem_ctls_mutex); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); @@ -2059,10 +1996,9 @@ static inline void check_mc_devices (void) mci->edac_check(mci); } - local_irq_restore(flags); + up(&mem_ctls_mutex); } - /* * Check MC status every poll_msec. * Check PCI status every poll_msec as well. @@ -2073,70 +2009,21 @@ static inline void check_mc_devices (void) */ static void do_edac_check(void) { - - debugf3("MC: " __FILE__ ": %s()\n", __func__); - + debugf3("%s()\n", __func__); check_mc_devices(); - do_pci_parity_check(); } - -/* - * EDAC thread state information - */ -struct bs_thread_info -{ - struct task_struct *task; - struct completion *event; - char *name; - void (*run)(void); -}; - -static struct bs_thread_info bs_thread; - -/* - * edac_kernel_thread - * This the kernel thread that processes edac operations - * in a normal thread environment - */ static int edac_kernel_thread(void *arg) { - struct bs_thread_info *thread = (struct bs_thread_info *) arg; - - /* detach thread */ - daemonize(thread->name); - - current->exit_signal = SIGCHLD; - allow_signal(SIGKILL); - thread->task = current; - - /* indicate to starting task we have started */ - complete(thread->event); - - /* loop forever, until we are told to stop */ - while(thread->run != NULL) { - void (*run)(void); - - /* call the function to check the memory controllers */ - run = thread->run; - if (run) - run(); - - if (signal_pending(current)) - flush_signals(current); - - /* ensure we are interruptable */ - set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + do_edac_check(); /* goto sleep for the interval */ - schedule_timeout((HZ * poll_msec) / 1000); + schedule_timeout_interruptible((HZ * poll_msec) / 1000); try_to_freeze(); } - /* notify waiter that we are exiting */ - complete(thread->event); - return 0; } @@ -2146,10 +2033,7 @@ static int edac_kernel_thread(void *arg) */ static int __init edac_mc_init(void) { - int ret; - struct completion event; - - printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n"); + edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n"); /* * Harvest and clear any boot/initialization PCI parity errors @@ -2160,80 +2044,54 @@ static int __init edac_mc_init(void) */ clear_pci_parity_errors(); - /* perform check for first time to harvest boot leftovers */ - do_edac_check(); - - /* Create the MC sysfs entires */ + /* Create the MC sysfs entries */ if (edac_sysfs_memctrl_setup()) { - printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n"); + edac_printk(KERN_ERR, EDAC_MC, + "Error initializing sysfs code\n"); return -ENODEV; } /* Create the PCI parity sysfs entries */ if (edac_sysfs_pci_setup()) { edac_sysfs_memctrl_teardown(); - printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n"); + edac_printk(KERN_ERR, EDAC_MC, + "EDAC PCI: Error initializing sysfs code\n"); return -ENODEV; } - /* Create our kernel thread */ - init_completion(&event); - bs_thread.event = &event; - bs_thread.name = "kedac"; - bs_thread.run = do_edac_check; - /* create our kernel thread */ - ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL); - if (ret < 0) { + edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac"); + + if (IS_ERR(edac_thread)) { /* remove the sysfs entries */ edac_sysfs_memctrl_teardown(); edac_sysfs_pci_teardown(); - return -ENOMEM; + return PTR_ERR(edac_thread); } - /* wait for our kernel theard ack that it is up and running */ - wait_for_completion(&event); - return 0; } - /* * edac_mc_exit() * module exit/termination functioni */ static void __exit edac_mc_exit(void) { - struct completion event; - - debugf0("MC: " __FILE__ ": %s()\n", __func__); - - init_completion(&event); - bs_thread.event = &event; - - /* As soon as ->run is set to NULL, the task could disappear, - * so we need to hold tasklist_lock until we have sent the signal - */ - read_lock(&tasklist_lock); - bs_thread.run = NULL; - send_sig(SIGKILL, bs_thread.task, 1); - read_unlock(&tasklist_lock); - wait_for_completion(&event); + debugf0("%s()\n", __func__); + kthread_stop(edac_thread); /* tear down the sysfs device */ edac_sysfs_memctrl_teardown(); edac_sysfs_pci_teardown(); } - - - module_init(edac_mc_init); module_exit(edac_mc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" - "Based on.work by Dan Hollis et al"); + "Based on work by Dan Hollis et al"); MODULE_DESCRIPTION("Core library routines for MC reporting"); module_param(panic_on_ue, int, 0644); diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index 75ecf484a43a..8d9e83909b9c 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h @@ -15,11 +15,9 @@ * */ - #ifndef _EDAC_MC_H_ #define _EDAC_MC_H_ - #include <linux/config.h> #include <linux/kernel.h> #include <linux/types.h> @@ -33,7 +31,6 @@ #include <linux/completion.h> #include <linux/kobject.h> - #define EDAC_MC_LABEL_LEN 31 #define MC_PROC_NAME_MAX_LEN 7 @@ -43,31 +40,53 @@ #define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) #endif +#define edac_printk(level, prefix, fmt, arg...) \ + printk(level "EDAC " prefix ": " fmt, ##arg) + +#define edac_mc_printk(mci, level, fmt, arg...) \ + printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) + +#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ + printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) + +/* prefixes for edac_printk() and edac_mc_printk() */ +#define EDAC_MC "MC" +#define EDAC_PCI "PCI" +#define EDAC_DEBUG "DEBUG" + #ifdef CONFIG_EDAC_DEBUG extern int edac_debug_level; -#define edac_debug_printk(level, fmt, args...) \ -do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0) + +#define edac_debug_printk(level, fmt, arg...) \ + do { \ + if (level <= edac_debug_level) \ + edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ + } while(0) + #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) -#else /* !CONFIG_EDAC_DEBUG */ + +#else /* !CONFIG_EDAC_DEBUG */ + #define debugf0( ... ) #define debugf1( ... ) #define debugf2( ... ) #define debugf3( ... ) #define debugf4( ... ) -#endif /* !CONFIG_EDAC_DEBUG */ +#endif /* !CONFIG_EDAC_DEBUG */ -#define bs_xstr(s) bs_str(s) -#define bs_str(s) #s -#define BS_MOD_STR bs_xstr(KBUILD_BASENAME) +#define edac_xstr(s) edac_str(s) +#define edac_str(s) #s +#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME) #define BIT(x) (1 << (x)) -#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev +#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ + PCI_DEVICE_ID_ ## vend ## _ ## dev /* memory devices */ enum dev_type { @@ -117,7 +136,6 @@ enum mem_type { #define MEM_FLAG_RDDR BIT(MEM_RDDR) #define MEM_FLAG_RMBS BIT(MEM_RMBS) - /* chipset Error Detection and Correction capabilities and mode */ enum edac_type { EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ @@ -142,7 +160,6 @@ enum edac_type { #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) - /* scrubbing capabilities */ enum scrub_type { SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ @@ -166,11 +183,6 @@ enum scrub_type { #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) -enum mci_sysfs_status { - MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */ - MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */ -}; - /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ /* @@ -255,20 +267,19 @@ enum mci_sysfs_status { * PS - I enjoyed writing all that about as much as you enjoyed reading it. */ - struct channel_info { int chan_idx; /* channel index */ u32 ce_count; /* Correctable Errors for this CHANNEL */ - char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ + char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ struct csrow_info *csrow; /* the parent */ }; - struct csrow_info { unsigned long first_page; /* first page number in dimm */ unsigned long last_page; /* last page number in dimm */ unsigned long page_mask; /* used for interleaving - - 0UL for non intlv */ + * 0UL for non intlv + */ u32 nr_pages; /* number of pages in csrow */ u32 grain; /* granularity of reported error in bytes */ int csrow_idx; /* the chip-select row */ @@ -280,29 +291,28 @@ struct csrow_info { struct mem_ctl_info *mci; /* the parent */ struct kobject kobj; /* sysfs kobject for this csrow */ + struct completion kobj_complete; /* FIXME the number of CHANNELs might need to become dynamic */ u32 nr_channels; struct channel_info *channels; }; - struct mem_ctl_info { struct list_head link; /* for global list of mem_ctl_info structs */ unsigned long mtype_cap; /* memory types supported by mc */ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ unsigned long edac_cap; /* configuration capabilities - this is - closely related to edac_ctl_cap. The - difference is that the controller - may be capable of s4ecd4ed which would - be listed in edac_ctl_cap, but if - channels aren't capable of s4ecd4ed then the - edac_cap would not have that capability. */ + * closely related to edac_ctl_cap. The + * difference is that the controller may be + * capable of s4ecd4ed which would be listed + * in edac_ctl_cap, but if channels aren't + * capable of s4ecd4ed then the edac_cap would + * not have that capability. + */ unsigned long scrub_cap; /* chipset scrub capabilities */ enum scrub_type scrub_mode; /* current scrub mode */ - enum mci_sysfs_status sysfs_active; /* status of sysfs */ - /* pointer to edac checking routine */ void (*edac_check) (struct mem_ctl_info * mci); /* @@ -311,7 +321,7 @@ struct mem_ctl_info { */ /* FIXME - why not send the phys page to begin with? */ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, - unsigned long page); + unsigned long page); int mc_idx; int nr_csrows; struct csrow_info *csrows; @@ -340,72 +350,69 @@ struct mem_ctl_info { /* edac sysfs device control */ struct kobject edac_mci_kobj; + struct completion kobj_complete; }; - - /* write all or some bits in a byte-register*/ -static inline void pci_write_bits8(struct pci_dev *pdev, int offset, - u8 value, u8 mask) +static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, + u8 mask) { if (mask != 0xff) { u8 buf; + pci_read_config_byte(pdev, offset, &buf); value &= mask; buf &= ~mask; value |= buf; } + pci_write_config_byte(pdev, offset, value); } - /* write all or some bits in a word-register*/ static inline void pci_write_bits16(struct pci_dev *pdev, int offset, - u16 value, u16 mask) + u16 value, u16 mask) { if (mask != 0xffff) { u16 buf; + pci_read_config_word(pdev, offset, &buf); value &= mask; buf &= ~mask; value |= buf; } + pci_write_config_word(pdev, offset, value); } - /* write all or some bits in a dword-register*/ static inline void pci_write_bits32(struct pci_dev *pdev, int offset, - u32 value, u32 mask) + u32 value, u32 mask) { if (mask != 0xffff) { u32 buf; + pci_read_config_dword(pdev, offset, &buf); value &= mask; buf &= ~mask; value |= buf; } + pci_write_config_dword(pdev, offset, value); } - #ifdef CONFIG_EDAC_DEBUG void edac_mc_dump_channel(struct channel_info *chan); void edac_mc_dump_mci(struct mem_ctl_info *mci); void edac_mc_dump_csrow(struct csrow_info *csrow); -#endif /* CONFIG_EDAC_DEBUG */ +#endif /* CONFIG_EDAC_DEBUG */ extern int edac_mc_add_mc(struct mem_ctl_info *mci); -extern int edac_mc_del_mc(struct mem_ctl_info *mci); - +extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev); extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, - unsigned long page); - -extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev - *pdev); - -extern void edac_mc_scrub_block(unsigned long page, - unsigned long offset, u32 size); + unsigned long page); +extern void edac_mc_scrub_block(unsigned long page, unsigned long offset, + u32 size); /* * The no info errors are used when error overflows are reported. @@ -418,31 +425,25 @@ extern void edac_mc_scrub_block(unsigned long page, * statement clutter and extra function arguments. */ extern void edac_mc_handle_ce(struct mem_ctl_info *mci, - unsigned long page_frame_number, - unsigned long offset_in_page, - unsigned long syndrome, - int row, int channel, const char *msg); - + unsigned long page_frame_number, unsigned long offset_in_page, + unsigned long syndrome, int row, int channel, + const char *msg); extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, - const char *msg); - + const char *msg); extern void edac_mc_handle_ue(struct mem_ctl_info *mci, - unsigned long page_frame_number, - unsigned long offset_in_page, - int row, const char *msg); - + unsigned long page_frame_number, unsigned long offset_in_page, + int row, const char *msg); extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, - const char *msg); + const char *msg); /* * This kmalloc's and initializes all the structures. * Can't be used if all structures don't have the same lifetime. */ -extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, - unsigned nr_csrows, unsigned nr_chans); +extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, + unsigned nr_chans); /* Free an mc previously allocated by edac_mc_alloc() */ extern void edac_mc_free(struct mem_ctl_info *mci); - #endif /* _EDAC_MC_H_ */ diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 52596e75f9c2..fd342163cf97 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c @@ -9,7 +9,6 @@ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) */ - #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> @@ -18,6 +17,11 @@ #include <linux/slab.h> #include "edac_mc.h" +#define i82860_printk(level, fmt, arg...) \ + edac_printk(level, "i82860", fmt, ##arg) + +#define i82860_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82860_0 #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 @@ -48,15 +52,15 @@ struct i82860_error_info { static const struct i82860_dev_info i82860_devs[] = { [I82860] = { - .ctl_name = "i82860"}, + .ctl_name = "i82860" + }, }; static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code - has already registered driver */ + * has already registered driver + */ -static int i82860_registered = 1; - -static void i82860_get_error_info (struct mem_ctl_info *mci, +static void i82860_get_error_info(struct mem_ctl_info *mci, struct i82860_error_info *info) { /* @@ -78,14 +82,15 @@ static void i82860_get_error_info (struct mem_ctl_info *mci, */ if (!(info->errsts2 & 0x0003)) return; + if ((info->errsts ^ info->errsts2) & 0x0003) { pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, - &info->derrsyn); + &info->derrsyn); } } -static int i82860_process_error_info (struct mem_ctl_info *mci, +static int i82860_process_error_info(struct mem_ctl_info *mci, struct i82860_error_info *info, int handle_errors) { int row; @@ -107,8 +112,8 @@ static int i82860_process_error_info (struct mem_ctl_info *mci, if (info->errsts & 0x0002) edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); else - edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, - 0, "i82860 UE"); + edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, + "i82860 UE"); return 1; } @@ -117,7 +122,7 @@ static void i82860_check(struct mem_ctl_info *mci) { struct i82860_error_info info; - debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i82860_get_error_info(mci, &info); i82860_process_error_info(mci, &info, 1); } @@ -128,6 +133,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) int index; struct mem_ctl_info *mci = NULL; unsigned long last_cumul_size; + struct i82860_error_info discard; u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ @@ -140,21 +146,20 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) going to make 1 channel for group. */ mci = edac_mc_alloc(0, 16, 1); + if (!mci) return -ENOMEM; - debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); - + debugf3("%s(): init mci\n", __func__); mci->pdev = pdev; mci->mtype_cap = MEM_FLAG_DDR; - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; /* I"m not sure about this but I think that all RDRAM is SECDED */ mci->edac_cap = EDAC_FLAG_SECDED; /* adjust FLAGS */ - mci->mod_name = BS_MOD_STR; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.1.2.6 $"; mci->ctl_name = i82860_devs[dev_idx].ctl_name; mci->edac_check = i82860_check; @@ -175,12 +180,13 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) struct csrow_info *csrow = &mci->csrows[index]; pci_read_config_word(mci->pdev, I82860_GBA + index * 2, - &value); + &value); cumul_size = (value & I82860_GBA_MASK) << (I82860_GBA_SHIFT - PAGE_SHIFT); - debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", - __func__, index, cumul_size); + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, + cumul_size); + if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -188,42 +194,43 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; - csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ + csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ csrow->mtype = MEM_RMBS; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; } - /* clear counters */ - pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003); + i82860_get_error_info(mci, &discard); /* clear counters */ if (edac_mc_add_mc(mci)) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", - __func__); + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); edac_mc_free(mci); } else { /* get this far and it's successful */ - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); rc = 0; } + return rc; } /* returns count (>= 0), or negative on error */ static int __devinit i82860_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); + i82860_printk(KERN_INFO, "i82860 init one\n"); - printk(KERN_INFO "i82860 init one\n"); - if(pci_enable_device(pdev) < 0) + if (pci_enable_device(pdev) < 0) return -EIO; + rc = i82860_probe1(pdev, ent->driver_data); - if(rc == 0) + + if (rc == 0) mci_pdev = pci_dev_get(pdev); + return rc; } @@ -231,23 +238,28 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); - mci = edac_mc_find_mci_by_pdev(pdev); - if ((mci != NULL) && (edac_mc_del_mc(mci) == 0)) - edac_mc_free(mci); + if ((mci = edac_mc_del_mc(pdev)) == NULL) + return; + + edac_mc_free(mci); } static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { - {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82860}, - {0,} /* 0 terminated list. */ + { + PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82860 + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); static struct pci_driver i82860_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = i82860_init_one, .remove = __devexit_p(i82860_remove_one), .id_table = i82860_pci_tbl, @@ -257,43 +269,56 @@ static int __init i82860_init(void) { int pci_rc; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); + if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) - return pci_rc; + goto fail0; if (!mci_pdev) { - i82860_registered = 0; mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82860_0, NULL); + PCI_DEVICE_ID_INTEL_82860_0, NULL); + if (mci_pdev == NULL) { debugf0("860 pci_get_device fail\n"); - return -ENODEV; + pci_rc = -ENODEV; + goto fail1; } + pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); + if (pci_rc < 0) { debugf0("860 init fail\n"); - pci_dev_put(mci_pdev); - return -ENODEV; + pci_rc = -ENODEV; + goto fail1; } } + return 0; + +fail1: + pci_unregister_driver(&i82860_driver); + +fail0: + if (mci_pdev != NULL) + pci_dev_put(mci_pdev); + + return pci_rc; } static void __exit i82860_exit(void) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); pci_unregister_driver(&i82860_driver); - if (!i82860_registered) { - i82860_remove_one(mci_pdev); + + if (mci_pdev != NULL) pci_dev_put(mci_pdev); - } } module_init(i82860_init); module_exit(i82860_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR - ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>"); +MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " + "Ben Woodard <woodard@redhat.com>"); MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 1991f94af753..0aec92698f17 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -13,18 +13,19 @@ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com */ - #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> - #include <linux/pci.h> #include <linux/pci_ids.h> - #include <linux/slab.h> - #include "edac_mc.h" +#define i82875p_printk(level, fmt, arg...) \ + edac_printk(level, "i82875p", fmt, ##arg) + +#define i82875p_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82875_0 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 @@ -34,11 +35,9 @@ #define PCI_DEVICE_ID_INTEL_82875_6 0x257e #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ - /* four csrows in dual channel, eight in single channel */ #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) - /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ #define I82875P_EAP 0x58 /* Error Address Pointer (32b) * @@ -87,7 +86,6 @@ * 0 reserved */ - /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) * @@ -151,23 +149,19 @@ * 1:0 DRAM type 01=DDR */ - enum i82875p_chips { I82875P = 0, }; - struct i82875p_pvt { struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window; }; - struct i82875p_dev_info { const char *ctl_name; }; - struct i82875p_error_info { u16 errsts; u32 eap; @@ -176,17 +170,19 @@ struct i82875p_error_info { u16 errsts2; }; - static const struct i82875p_dev_info i82875p_devs[] = { [I82875P] = { - .ctl_name = "i82875p"}, + .ctl_name = "i82875p" + }, }; -static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code - has already registered driver */ +static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has + * already registered driver + */ + static int i82875p_registered = 1; -static void i82875p_get_error_info (struct mem_ctl_info *mci, +static void i82875p_get_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info) { /* @@ -210,15 +206,16 @@ static void i82875p_get_error_info (struct mem_ctl_info *mci, */ if (!(info->errsts2 & 0x0081)) return; + if ((info->errsts ^ info->errsts2) & 0x0081) { pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); pci_read_config_byte(mci->pdev, I82875P_DERRSYN, - &info->derrsyn); + &info->derrsyn); } } -static int i82875p_process_error_info (struct mem_ctl_info *mci, +static int i82875p_process_error_info(struct mem_ctl_info *mci, struct i82875p_error_info *info, int handle_errors) { int row, multi_chan; @@ -243,23 +240,21 @@ static int i82875p_process_error_info (struct mem_ctl_info *mci, edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); else edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, - multi_chan ? (info->des & 0x1) : 0, - "i82875p CE"); + multi_chan ? (info->des & 0x1) : 0, + "i82875p CE"); return 1; } - static void i82875p_check(struct mem_ctl_info *mci) { struct i82875p_error_info info; - debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i82875p_get_error_info(mci, &info); i82875p_process_error_info(mci, &info, 1); } - #ifdef CONFIG_PROC_FS extern int pci_proc_attach_device(struct pci_dev *); #endif @@ -273,15 +268,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) unsigned long last_cumul_size; struct pci_dev *ovrfl_pdev; void __iomem *ovrfl_window = NULL; - u32 drc; u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ u32 nr_chans; u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ + struct i82875p_error_info discard; - debugf0("MC: " __FILE__ ": %s()\n", __func__); - - ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); + debugf0("%s()\n", __func__); + ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); if (!ovrfl_pdev) { /* @@ -292,71 +286,69 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) */ pci_write_bits8(pdev, 0xf4, 0x2, 0x2); ovrfl_pdev = - pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); + pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); + if (!ovrfl_pdev) - goto fail; + return -ENODEV; } + #ifdef CONFIG_PROC_FS if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { - printk(KERN_ERR "MC: " __FILE__ - ": %s(): Failed to attach overflow device\n", - __func__); - goto fail; + i82875p_printk(KERN_ERR, + "%s(): Failed to attach overflow device\n", __func__); + return -ENODEV; } -#endif /* CONFIG_PROC_FS */ +#endif + /* CONFIG_PROC_FS */ if (pci_enable_device(ovrfl_pdev)) { - printk(KERN_ERR "MC: " __FILE__ - ": %s(): Failed to enable overflow device\n", - __func__); - goto fail; + i82875p_printk(KERN_ERR, + "%s(): Failed to enable overflow device\n", __func__); + return -ENODEV; } if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { #ifdef CORRECT_BIOS - goto fail; + goto fail0; #endif } + /* cache is irrelevant for PCI bus reads/writes */ ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), - pci_resource_len(ovrfl_pdev, 0)); + pci_resource_len(ovrfl_pdev, 0)); if (!ovrfl_window) { - printk(KERN_ERR "MC: " __FILE__ - ": %s(): Failed to ioremap bar6\n", __func__); - goto fail; + i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", + __func__); + goto fail1; } /* need to find out the number of channels */ drc = readl(ovrfl_window + I82875P_DRC); drc_chan = ((drc >> 21) & 0x1); nr_chans = drc_chan + 1; - drc_ddim = (drc >> 18) & 0x1; + drc_ddim = (drc >> 18) & 0x1; mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), - nr_chans); + nr_chans); if (!mci) { rc = -ENOMEM; - goto fail; + goto fail2; } - debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); - + debugf3("%s(): init mci\n", __func__); mci->pdev = pdev; mci->mtype_cap = MEM_FLAG_DDR; - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_UNKNOWN; /* adjust FLAGS */ - mci->mod_name = BS_MOD_STR; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.5.2.11 $"; mci->ctl_name = i82875p_devs[dev_idx].ctl_name; mci->edac_check = i82875p_check; mci->ctl_page_to_phys = NULL; - - debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); - + debugf3("%s(): init pvt\n", __func__); pvt = (struct i82875p_pvt *) mci->pvt_info; pvt->ovrfl_pdev = ovrfl_pdev; pvt->ovrfl_window = ovrfl_window; @@ -374,8 +366,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) value = readb(ovrfl_window + I82875P_DRB + index); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); - debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", - __func__, index, cumul_size); + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, + cumul_size); + if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -383,71 +376,72 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; - csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ + csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ csrow->mtype = MEM_DDR; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; } - /* clear counters */ - pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081); + i82875p_get_error_info(mci, &discard); /* clear counters */ if (edac_mc_add_mc(mci)) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); - goto fail; + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + goto fail3; } /* get this far and it's successful */ - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); return 0; - fail: - if (mci) - edac_mc_free(mci); +fail3: + edac_mc_free(mci); - if (ovrfl_window) - iounmap(ovrfl_window); +fail2: + iounmap(ovrfl_window); - if (ovrfl_pdev) { - pci_release_regions(ovrfl_pdev); - pci_disable_device(ovrfl_pdev); - } +fail1: + pci_release_regions(ovrfl_pdev); +#ifdef CORRECT_BIOS +fail0: +#endif + pci_disable_device(ovrfl_pdev); /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ return rc; } - /* returns count (>= 0), or negative on error */ static int __devinit i82875p_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); + i82875p_printk(KERN_INFO, "i82875p init one\n"); - printk(KERN_INFO "i82875p init one\n"); - if(pci_enable_device(pdev) < 0) + if (pci_enable_device(pdev) < 0) return -EIO; + rc = i82875p_probe1(pdev, ent->driver_data); + if (mci_pdev == NULL) mci_pdev = pci_dev_get(pdev); + return rc; } - static void __devexit i82875p_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i82875p_pvt *pvt = NULL; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); - if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) + if ((mci = edac_mc_del_mc(pdev)) == NULL) return; pvt = (struct i82875p_pvt *) mci->pvt_info; + if (pvt->ovrfl_window) iounmap(pvt->ovrfl_window); @@ -459,74 +453,84 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) pci_dev_put(pvt->ovrfl_pdev); } - if (edac_mc_del_mc(mci)) - return; - edac_mc_free(mci); } - static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { - {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82875P}, - {0,} /* 0 terminated list. */ + { + PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82875P + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); - static struct pci_driver i82875p_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = i82875p_init_one, .remove = __devexit_p(i82875p_remove_one), .id_table = i82875p_pci_tbl, }; - static int __init i82875p_init(void) { int pci_rc; - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); pci_rc = pci_register_driver(&i82875p_driver); + if (pci_rc < 0) - return pci_rc; + goto fail0; + if (mci_pdev == NULL) { - i82875p_registered = 0; - mci_pdev = - pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82875_0, NULL); + mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82875_0, NULL); + if (!mci_pdev) { debugf0("875p pci_get_device fail\n"); - return -ENODEV; + pci_rc = -ENODEV; + goto fail1; } + pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); + if (pci_rc < 0) { debugf0("875p init fail\n"); - pci_dev_put(mci_pdev); - return -ENODEV; + pci_rc = -ENODEV; + goto fail1; } } + return 0; -} +fail1: + pci_unregister_driver(&i82875p_driver); + +fail0: + if (mci_pdev != NULL) + pci_dev_put(mci_pdev); + + return pci_rc; +} static void __exit i82875p_exit(void) { - debugf3("MC: " __FILE__ ": %s()\n", __func__); + debugf3("%s()\n", __func__); pci_unregister_driver(&i82875p_driver); + if (!i82875p_registered) { i82875p_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } - module_init(i82875p_init); module_exit(i82875p_exit); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index e90892831b90..2c29fafe67c7 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c @@ -18,14 +18,17 @@ #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> - #include <linux/pci.h> #include <linux/pci_ids.h> - #include <linux/slab.h> - #include "edac_mc.h" +#define r82600_printk(level, fmt, arg...) \ + edac_printk(level, "r82600", fmt, ##arg) + +#define r82600_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg) + /* Radisys say "The 82600 integrates a main memory SDRAM controller that * supports up to four banks of memory. The four banks can support a mix of * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, @@ -126,10 +129,8 @@ struct r82600_error_info { u32 eapr; }; - static unsigned int disable_hardware_scrub = 0; - static void r82600_get_error_info (struct mem_ctl_info *mci, struct r82600_error_info *info) { @@ -138,17 +139,16 @@ static void r82600_get_error_info (struct mem_ctl_info *mci, if (info->eapr & BIT(0)) /* Clear error to allow next error to be reported [p.62] */ pci_write_bits32(mci->pdev, R82600_EAP, - ((u32) BIT(0) & (u32) BIT(1)), - ((u32) BIT(0) & (u32) BIT(1))); + ((u32) BIT(0) & (u32) BIT(1)), + ((u32) BIT(0) & (u32) BIT(1))); if (info->eapr & BIT(1)) /* Clear error to allow next error to be reported [p.62] */ pci_write_bits32(mci->pdev, R82600_EAP, - ((u32) BIT(0) & (u32) BIT(1)), - ((u32) BIT(0) & (u32) BIT(1))); + ((u32) BIT(0) & (u32) BIT(1)), + ((u32) BIT(0) & (u32) BIT(1))); } - static int r82600_process_error_info (struct mem_ctl_info *mci, struct r82600_error_info *info, int handle_errors) { @@ -167,26 +167,25 @@ static int r82600_process_error_info (struct mem_ctl_info *mci, * granularity (upper 19 bits only) */ page = eapaddr >> PAGE_SHIFT; - if (info->eapr & BIT(0)) { /* CE? */ + if (info->eapr & BIT(0)) { /* CE? */ error_found = 1; if (handle_errors) - edac_mc_handle_ce( - mci, page, 0, /* not avail */ - syndrome, - edac_mc_find_csrow_by_page(mci, page), - 0, /* channel */ - mci->ctl_name); + edac_mc_handle_ce(mci, page, 0, /* not avail */ + syndrome, + edac_mc_find_csrow_by_page(mci, page), + 0, /* channel */ + mci->ctl_name); } - if (info->eapr & BIT(1)) { /* UE? */ + if (info->eapr & BIT(1)) { /* UE? */ error_found = 1; if (handle_errors) /* 82600 doesn't give enough info */ edac_mc_handle_ue(mci, page, 0, - edac_mc_find_csrow_by_page(mci, page), - mci->ctl_name); + edac_mc_find_csrow_by_page(mci, page), + mci->ctl_name); } return error_found; @@ -196,7 +195,7 @@ static void r82600_check(struct mem_ctl_info *mci) { struct r82600_error_info info; - debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf1("MC%d: %s()\n", mci->mc_idx, __func__); r82600_get_error_info(mci, &info); r82600_process_error_info(mci, &info, 1); } @@ -213,25 +212,18 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) u32 scrub_disabled; u32 sdram_refresh_rate; u32 row_high_limit_last = 0; - u32 eap_init_bits; - - debugf0("MC: " __FILE__ ": %s()\n", __func__); - + struct r82600_error_info discard; + debugf0("%s()\n", __func__); pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); pci_read_config_dword(pdev, R82600_EAP, &eapr); - ecc_on = dramcr & BIT(5); reg_sdram = dramcr & BIT(4); scrub_disabled = eapr & BIT(31); sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); - - debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n", - __func__, sdram_refresh_rate); - - debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__, - dramcr); - + debugf2("%s(): sdram refresh rate = %#0x\n", __func__, + sdram_refresh_rate); + debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); if (mci == NULL) { @@ -239,29 +231,28 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) goto fail; } - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); - + debugf0("%s(): mci = %p\n", __func__, mci); mci->pdev = pdev; mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; - mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; - /* FIXME try to work out if the chip leads have been * - * used for COM2 instead on this board? [MA6?] MAYBE: */ + /* FIXME try to work out if the chip leads have been used for COM2 + * instead on this board? [MA6?] MAYBE: + */ /* On the R82600, the pins for memory bits 72:65 - i.e. the * * EC bits are shared with the pins for COM2 (!), so if COM2 * * is enabled, we assume COM2 is wired up, and thus no EDAC * * is possible. */ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; + if (ecc_on) { if (scrub_disabled) - debugf3("MC: " __FILE__ ": %s(): mci = %p - " - "Scrubbing disabled! EAP: %#0x\n", __func__, - mci, eapr); + debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " + "%#0x\n", __func__, mci, eapr); } else mci->edac_cap = EDAC_FLAG_NONE; - mci->mod_name = BS_MOD_STR; + mci->mod_name = EDAC_MOD_STR; mci->mod_ver = "$Revision: 1.1.2.6 $"; mci->ctl_name = "R82600"; mci->edac_check = r82600_check; @@ -276,23 +267,21 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) /* find the DRAM Chip Select Base address and mask */ pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar); - debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n", - mci->mc_idx, __func__, index, drbar); + debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx, + __func__, index, drbar); row_high_limit = ((u32) drbar << 24); /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ - debugf1("MC%d: " __FILE__ ": %s() Row=%d, " - "Boundry Address=%#0x, Last = %#0x \n", - mci->mc_idx, __func__, index, row_high_limit, - row_high_limit_last); + debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = " + "%#0x \n", mci->mc_idx, __func__, index, + row_high_limit, row_high_limit_last); /* Empty row [p.57] */ if (row_high_limit == row_high_limit_last) continue; row_base = row_high_limit_last; - csrow->first_page = row_base >> PAGE_SHIFT; csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; csrow->nr_pages = csrow->last_page - csrow->first_page + 1; @@ -308,31 +297,22 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) row_high_limit_last = row_high_limit; } - /* clear counters */ - /* FIXME should we? */ + r82600_get_error_info(mci, &discard); /* clear counters */ if (edac_mc_add_mc(mci)) { - debugf3("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ - /* Clear error flags to allow next error to be reported [p.62] */ - /* Test systems seem to always have the UE flag raised on boot */ - - eap_init_bits = BIT(0) & BIT(1); if (disable_hardware_scrub) { - eap_init_bits |= BIT(31); - debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub " - "(scrub on error)\n", __func__); + debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", + __func__); + pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31)); } - pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits, - eap_init_bits); - - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("%s(): success\n", __func__); return 0; fail: @@ -344,62 +324,60 @@ fail: /* returns count (>= 0), or negative on error */ static int __devinit r82600_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); /* don't need to call pci_device_enable() */ return r82600_probe1(pdev, ent->driver_data); } - static void __devexit r82600_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s()\n", __func__); - if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) && - !edac_mc_del_mc(mci)) - edac_mc_free(mci); -} + if ((mci = edac_mc_del_mc(pdev)) == NULL) + return; + edac_mc_free(mci); +} static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { - {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)}, - {0,} /* 0 terminated list. */ + { + PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) + }, + { + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); - static struct pci_driver r82600_driver = { - .name = BS_MOD_STR, + .name = EDAC_MOD_STR, .probe = r82600_init_one, .remove = __devexit_p(r82600_remove_one), .id_table = r82600_pci_tbl, }; - static int __init r82600_init(void) { return pci_register_driver(&r82600_driver); } - static void __exit r82600_exit(void) { pci_unregister_driver(&r82600_driver); } - module_init(r82600_init); module_exit(r82600_exit); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " - "on behalf of EADS Astrium"); + "on behalf of EADS Astrium"); MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); module_param(disable_hardware_scrub, bool, 0644); diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index d6543fc4a923..339f405ff708 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -484,26 +484,15 @@ static void dcdbas_host_control(void) static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { - static unsigned int notify_cnt = 0; - switch (code) { case SYS_DOWN: case SYS_HALT: case SYS_POWER_OFF: if (host_control_on_shutdown) { /* firmware is going to perform host control action */ - if (++notify_cnt == 2) { - printk(KERN_WARNING - "Please wait for shutdown " - "action to complete...\n"); - dcdbas_host_control(); - } - /* - * register again and initiate the host control - * action on the second notification to allow - * everyone that registered to be notified - */ - register_reboot_notifier(nb); + printk(KERN_WARNING "Please wait for shutdown " + "action to complete...\n"); + dcdbas_host_control(); } break; } @@ -514,7 +503,7 @@ static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code, static struct notifier_block dcdbas_reboot_nb = { .notifier_call = dcdbas_reboot_notify, .next = NULL, - .priority = 0 + .priority = INT_MIN }; static DCDBAS_BIN_ATTR_RW(smi_data); diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 343379f23a53..9b7e4d52ffd4 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -568,20 +568,20 @@ systab_read(struct subsystem *entry, char *buf) if (!entry || !buf) return -EINVAL; - if (efi.mps) - str += sprintf(str, "MPS=0x%lx\n", __pa(efi.mps)); - if (efi.acpi20) - str += sprintf(str, "ACPI20=0x%lx\n", __pa(efi.acpi20)); - if (efi.acpi) - str += sprintf(str, "ACPI=0x%lx\n", __pa(efi.acpi)); - if (efi.smbios) - str += sprintf(str, "SMBIOS=0x%lx\n", __pa(efi.smbios)); - if (efi.hcdp) - str += sprintf(str, "HCDP=0x%lx\n", __pa(efi.hcdp)); - if (efi.boot_info) - str += sprintf(str, "BOOTINFO=0x%lx\n", __pa(efi.boot_info)); - if (efi.uga) - str += sprintf(str, "UGA=0x%lx\n", __pa(efi.uga)); + if (efi.mps != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "MPS=0x%lx\n", efi.mps); + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); + if (efi.acpi != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); + if (efi.hcdp != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); + if (efi.boot_info != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info); + if (efi.uga != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "UGA=0x%lx\n", efi.uga); return str - buf; } diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c index ae1fb45dbb40..c37baf9448bc 100644 --- a/drivers/firmware/pcdp.c +++ b/drivers/firmware/pcdp.c @@ -89,19 +89,20 @@ efi_setup_pcdp_console(char *cmdline) struct pcdp_uart *uart; struct pcdp_device *dev, *end; int i, serial = 0; + int rc = -ENODEV; - pcdp = efi.hcdp; - if (!pcdp) + if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return -ENODEV; - printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp)); + pcdp = ioremap(efi.hcdp, 4096); + printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); if (strstr(cmdline, "console=hcdp")) { if (pcdp->rev < 3) serial = 1; } else if (strstr(cmdline, "console=")) { printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); - return -ENODEV; + goto out; } if (pcdp->rev < 3 && efi_uart_console_only()) @@ -110,7 +111,8 @@ efi_setup_pcdp_console(char *cmdline) for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { if (uart->type == PCDP_CONSOLE_UART) { - return setup_serial_console(uart); + rc = setup_serial_console(uart); + goto out; } } } @@ -121,10 +123,13 @@ efi_setup_pcdp_console(char *cmdline) dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { if (dev->flags & PCDP_PRIMARY_CONSOLE) { if (dev->type == PCDP_CONSOLE_VGA) { - return setup_vga_console(dev); + rc = setup_vga_console(dev); + goto out; } } } - return -ENODEV; +out: + iounmap(pcdp); + return rc; } diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig index f9fae28f5612..7aa5c38f0855 100644 --- a/drivers/i2c/chips/Kconfig +++ b/drivers/i2c/chips/Kconfig @@ -65,15 +65,6 @@ config SENSORS_PCF8591 This driver can also be built as a module. If so, the module will be called pcf8591. -config SENSORS_RTC8564 - tristate "Epson 8564 RTC chip" - depends on I2C && EXPERIMENTAL - help - If you say yes here you get support for the Epson 8564 RTC chip. - - This driver can also be built as a module. If so, the module - will be called i2c-rtc8564. - config ISP1301_OMAP tristate "Philips ISP1301 with OMAP OTG" depends on I2C && ARCH_OMAP_OTG @@ -126,13 +117,4 @@ config SENSORS_MAX6875 This driver can also be built as a module. If so, the module will be called max6875. -config RTC_X1205_I2C - tristate "Xicor X1205 RTC chip" - depends on I2C && EXPERIMENTAL - help - If you say yes here you get support for the Xicor X1205 RTC chip. - - This driver can also be built as a module. If so, the module - will be called x1205. - endmenu diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile index 46178b57b1f1..779868ef2e26 100644 --- a/drivers/i2c/chips/Makefile +++ b/drivers/i2c/chips/Makefile @@ -10,10 +10,8 @@ obj-$(CONFIG_SENSORS_M41T00) += m41t00.o obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o -obj-$(CONFIG_SENSORS_RTC8564) += rtc8564.o obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o obj-$(CONFIG_TPS65010) += tps65010.o -obj-$(CONFIG_RTC_X1205_I2C) += x1205.o ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c deleted file mode 100644 index 0d8699b3f488..000000000000 --- a/drivers/i2c/chips/rtc8564.c +++ /dev/null @@ -1,385 +0,0 @@ -/* - * linux/drivers/i2c/chips/rtc8564.c - * - * Copyright (C) 2002-2004 Stefan Eletzhofer - * - * based on linux/drivers/acron/char/pcf8583.c - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Driver for system3's EPSON RTC 8564 chip - */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bcd.h> -#include <linux/i2c.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/rtc.h> /* get the user-level API */ -#include <linux/init.h> - -#include "rtc8564.h" - -#ifdef DEBUG -# define _DBG(x, fmt, args...) do{ if (debug>=x) printk(KERN_DEBUG"%s: " fmt "\n", __FUNCTION__, ##args); } while(0); -#else -# define _DBG(x, fmt, args...) do { } while(0); -#endif - -#define _DBGRTCTM(x, rtctm) if (debug>=x) printk("%s: secs=%d, mins=%d, hours=%d, mday=%d, " \ - "mon=%d, year=%d, wday=%d VL=%d\n", __FUNCTION__, \ - (rtctm).secs, (rtctm).mins, (rtctm).hours, (rtctm).mday, \ - (rtctm).mon, (rtctm).year, (rtctm).wday, (rtctm).vl); - -struct rtc8564_data { - struct i2c_client client; - u16 ctrl; -}; - -static inline u8 _rtc8564_ctrl1(struct i2c_client *client) -{ - struct rtc8564_data *data = i2c_get_clientdata(client); - return data->ctrl & 0xff; -} -static inline u8 _rtc8564_ctrl2(struct i2c_client *client) -{ - struct rtc8564_data *data = i2c_get_clientdata(client); - return (data->ctrl & 0xff00) >> 8; -} - -#define CTRL1(c) _rtc8564_ctrl1(c) -#define CTRL2(c) _rtc8564_ctrl2(c) - -static int debug; -module_param(debug, int, S_IRUGO | S_IWUSR); - -static struct i2c_driver rtc8564_driver; - -static unsigned short ignore[] = { I2C_CLIENT_END }; -static unsigned short normal_addr[] = { 0x51, I2C_CLIENT_END }; - -static struct i2c_client_address_data addr_data = { - .normal_i2c = normal_addr, - .probe = ignore, - .ignore = ignore, -}; - -static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem); -static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem); - -static int rtc8564_read(struct i2c_client *client, unsigned char adr, - unsigned char *buf, unsigned char len) -{ - int ret = -EIO; - unsigned char addr[1] = { adr }; - struct i2c_msg msgs[2] = { - {client->addr, 0, 1, addr}, - {client->addr, I2C_M_RD, len, buf} - }; - - _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, buf, len); - - if (!buf) { - ret = -EINVAL; - goto done; - } - - ret = i2c_transfer(client->adapter, msgs, 2); - if (ret == 2) { - ret = 0; - } - -done: - return ret; -} - -static int rtc8564_write(struct i2c_client *client, unsigned char adr, - unsigned char *data, unsigned char len) -{ - int ret = 0; - unsigned char _data[16]; - struct i2c_msg wr; - int i; - - if (!data || len > 15) { - ret = -EINVAL; - goto done; - } - - _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, data, len); - - _data[0] = adr; - for (i = 0; i < len; i++) { - _data[i + 1] = data[i]; - _DBG(5, "data[%d] = 0x%02x (%d)", i, data[i], data[i]); - } - - wr.addr = client->addr; - wr.flags = 0; - wr.len = len + 1; - wr.buf = _data; - - ret = i2c_transfer(client->adapter, &wr, 1); - if (ret == 1) { - ret = 0; - } - -done: - return ret; -} - -static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind) -{ - int ret; - struct i2c_client *new_client; - struct rtc8564_data *d; - unsigned char data[10]; - unsigned char ad[1] = { 0 }; - struct i2c_msg ctrl_wr[1] = { - {addr, 0, 2, data} - }; - struct i2c_msg ctrl_rd[2] = { - {addr, 0, 1, ad}, - {addr, I2C_M_RD, 2, data} - }; - - d = kzalloc(sizeof(struct rtc8564_data), GFP_KERNEL); - if (!d) { - ret = -ENOMEM; - goto done; - } - new_client = &d->client; - - strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE); - i2c_set_clientdata(new_client, d); - new_client->addr = addr; - new_client->adapter = adap; - new_client->driver = &rtc8564_driver; - - _DBG(1, "client=%p", new_client); - - /* init ctrl1 reg */ - data[0] = 0; - data[1] = 0; - ret = i2c_transfer(new_client->adapter, ctrl_wr, 1); - if (ret != 1) { - printk(KERN_INFO "rtc8564: cant init ctrl1\n"); - ret = -ENODEV; - goto done; - } - - /* read back ctrl1 and ctrl2 */ - ret = i2c_transfer(new_client->adapter, ctrl_rd, 2); - if (ret != 2) { - printk(KERN_INFO "rtc8564: cant read ctrl\n"); - ret = -ENODEV; - goto done; - } - - d->ctrl = data[0] | (data[1] << 8); - - _DBG(1, "RTC8564_REG_CTRL1=%02x, RTC8564_REG_CTRL2=%02x", - data[0], data[1]); - - ret = i2c_attach_client(new_client); -done: - if (ret) { - kfree(d); - } - return ret; -} - -static int rtc8564_probe(struct i2c_adapter *adap) -{ - return i2c_probe(adap, &addr_data, rtc8564_attach); -} - -static int rtc8564_detach(struct i2c_client *client) -{ - i2c_detach_client(client); - kfree(i2c_get_clientdata(client)); - return 0; -} - -static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt) -{ - int ret = -EIO; - unsigned char buf[15]; - - _DBG(1, "client=%p, dt=%p", client, dt); - - if (!dt) - return -EINVAL; - - memset(buf, 0, sizeof(buf)); - - ret = rtc8564_read(client, 0, buf, 15); - if (ret) - return ret; - - /* century stored in minute alarm reg */ - dt->year = BCD2BIN(buf[RTC8564_REG_YEAR]); - dt->year += 100 * BCD2BIN(buf[RTC8564_REG_AL_MIN] & 0x3f); - dt->mday = BCD2BIN(buf[RTC8564_REG_DAY] & 0x3f); - dt->wday = BCD2BIN(buf[RTC8564_REG_WDAY] & 7); - dt->mon = BCD2BIN(buf[RTC8564_REG_MON_CENT] & 0x1f); - - dt->secs = BCD2BIN(buf[RTC8564_REG_SEC] & 0x7f); - dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80; - dt->mins = BCD2BIN(buf[RTC8564_REG_MIN] & 0x7f); - dt->hours = BCD2BIN(buf[RTC8564_REG_HR] & 0x3f); - - _DBGRTCTM(2, *dt); - - return 0; -} - -static int -rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo) -{ - int ret, len = 5; - unsigned char buf[15]; - - _DBG(1, "client=%p, dt=%p", client, dt); - - if (!dt) - return -EINVAL; - - _DBGRTCTM(2, *dt); - - buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP; - buf[RTC8564_REG_CTRL2] = CTRL2(client); - buf[RTC8564_REG_SEC] = BIN2BCD(dt->secs); - buf[RTC8564_REG_MIN] = BIN2BCD(dt->mins); - buf[RTC8564_REG_HR] = BIN2BCD(dt->hours); - - if (datetoo) { - len += 5; - buf[RTC8564_REG_DAY] = BIN2BCD(dt->mday); - buf[RTC8564_REG_WDAY] = BIN2BCD(dt->wday); - buf[RTC8564_REG_MON_CENT] = BIN2BCD(dt->mon) & 0x1f; - /* century stored in minute alarm reg */ - buf[RTC8564_REG_YEAR] = BIN2BCD(dt->year % 100); - buf[RTC8564_REG_AL_MIN] = BIN2BCD(dt->year / 100); - } - - ret = rtc8564_write(client, 0, buf, len); - if (ret) { - _DBG(1, "error writing data! %d", ret); - } - - buf[RTC8564_REG_CTRL1] = CTRL1(client); - ret = rtc8564_write(client, 0, buf, 1); - if (ret) { - _DBG(1, "error writing data! %d", ret); - } - - return ret; -} - -static int rtc8564_get_ctrl(struct i2c_client *client, unsigned int *ctrl) -{ - struct rtc8564_data *data = i2c_get_clientdata(client); - - if (!ctrl) - return -1; - - *ctrl = data->ctrl; - return 0; -} - -static int rtc8564_set_ctrl(struct i2c_client *client, unsigned int *ctrl) -{ - struct rtc8564_data *data = i2c_get_clientdata(client); - unsigned char buf[2]; - - if (!ctrl) - return -1; - - buf[0] = *ctrl & 0xff; - buf[1] = (*ctrl & 0xff00) >> 8; - data->ctrl = *ctrl; - - return rtc8564_write(client, 0, buf, 2); -} - -static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem) -{ - - if (!mem) - return -EINVAL; - - return rtc8564_read(client, mem->loc, mem->data, mem->nr); -} - -static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem) -{ - - if (!mem) - return -EINVAL; - - return rtc8564_write(client, mem->loc, mem->data, mem->nr); -} - -static int -rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg) -{ - - _DBG(1, "cmd=%d", cmd); - - switch (cmd) { - case RTC_GETDATETIME: - return rtc8564_get_datetime(client, arg); - - case RTC_SETTIME: - return rtc8564_set_datetime(client, arg, 0); - - case RTC_SETDATETIME: - return rtc8564_set_datetime(client, arg, 1); - - case RTC_GETCTRL: - return rtc8564_get_ctrl(client, arg); - - case RTC_SETCTRL: - return rtc8564_set_ctrl(client, arg); - - case MEM_READ: - return rtc8564_read_mem(client, arg); - - case MEM_WRITE: - return rtc8564_write_mem(client, arg); - - default: - return -EINVAL; - } -} - -static struct i2c_driver rtc8564_driver = { - .driver = { - .name = "RTC8564", - }, - .id = I2C_DRIVERID_RTC8564, - .attach_adapter = rtc8564_probe, - .detach_client = rtc8564_detach, - .command = rtc8564_command -}; - -static __init int rtc8564_init(void) -{ - return i2c_add_driver(&rtc8564_driver); -} - -static __exit void rtc8564_exit(void) -{ - i2c_del_driver(&rtc8564_driver); -} - -MODULE_AUTHOR("Stefan Eletzhofer <Stefan.Eletzhofer@eletztrick.de>"); -MODULE_DESCRIPTION("EPSON RTC8564 Driver"); -MODULE_LICENSE("GPL"); - -module_init(rtc8564_init); -module_exit(rtc8564_exit); diff --git a/drivers/i2c/chips/rtc8564.h b/drivers/i2c/chips/rtc8564.h deleted file mode 100644 index e5342d10b8fa..000000000000 --- a/drivers/i2c/chips/rtc8564.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * linux/drivers/i2c/chips/rtc8564.h - * - * Copyright (C) 2002-2004 Stefan Eletzhofer - * - * based on linux/drivers/acron/char/pcf8583.h - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -struct rtc_tm { - unsigned char secs; - unsigned char mins; - unsigned char hours; - unsigned char mday; - unsigned char mon; - unsigned short year; /* xxxx 4 digits :) */ - unsigned char wday; - unsigned char vl; -}; - -struct mem { - unsigned int loc; - unsigned int nr; - unsigned char *data; -}; - -#define RTC_GETDATETIME 0 -#define RTC_SETTIME 1 -#define RTC_SETDATETIME 2 -#define RTC_GETCTRL 3 -#define RTC_SETCTRL 4 -#define MEM_READ 5 -#define MEM_WRITE 6 - -#define RTC8564_REG_CTRL1 0x0 /* T 0 S 0 | T 0 0 0 */ -#define RTC8564_REG_CTRL2 0x1 /* 0 0 0 TI/TP | AF TF AIE TIE */ -#define RTC8564_REG_SEC 0x2 /* VL 4 2 1 | 8 4 2 1 */ -#define RTC8564_REG_MIN 0x3 /* x 4 2 1 | 8 4 2 1 */ -#define RTC8564_REG_HR 0x4 /* x x 2 1 | 8 4 2 1 */ -#define RTC8564_REG_DAY 0x5 /* x x 2 1 | 8 4 2 1 */ -#define RTC8564_REG_WDAY 0x6 /* x x x x | x 4 2 1 */ -#define RTC8564_REG_MON_CENT 0x7 /* C x x 1 | 8 4 2 1 */ -#define RTC8564_REG_YEAR 0x8 /* 8 4 2 1 | 8 4 2 1 */ -#define RTC8564_REG_AL_MIN 0x9 /* AE 4 2 1 | 8 4 2 1 */ -#define RTC8564_REG_AL_HR 0xa /* AE 4 2 1 | 8 4 2 1 */ -#define RTC8564_REG_AL_DAY 0xb /* AE x 2 1 | 8 4 2 1 */ -#define RTC8564_REG_AL_WDAY 0xc /* AE x x x | x 4 2 1 */ -#define RTC8564_REG_CLKOUT 0xd /* FE x x x | x x FD1 FD0 */ -#define RTC8564_REG_TCTL 0xe /* TE x x x | x x FD1 FD0 */ -#define RTC8564_REG_TIMER 0xf /* 8 bit binary */ - -/* Control reg */ -#define RTC8564_CTRL1_TEST1 (1<<3) -#define RTC8564_CTRL1_STOP (1<<5) -#define RTC8564_CTRL1_TEST2 (1<<7) - -#define RTC8564_CTRL2_TIE (1<<0) -#define RTC8564_CTRL2_AIE (1<<1) -#define RTC8564_CTRL2_TF (1<<2) -#define RTC8564_CTRL2_AF (1<<3) -#define RTC8564_CTRL2_TI_TP (1<<4) - -/* CLKOUT frequencies */ -#define RTC8564_FD_32768HZ (0x0) -#define RTC8564_FD_1024HZ (0x1) -#define RTC8564_FD_32 (0x2) -#define RTC8564_FD_1HZ (0x3) - -/* Timer CTRL */ -#define RTC8564_TD_4096HZ (0x0) -#define RTC8564_TD_64HZ (0x1) -#define RTC8564_TD_1HZ (0x2) -#define RTC8564_TD_1_60HZ (0x3) - -#define I2C_DRIVERID_RTC8564 0xf000 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index c7671e188017..b4a41d6d0714 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -2143,6 +2143,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; req.data = (char *)&capbuf; req.data_len = sizeof(capbuf); + req.flags |= REQ_QUIET; stat = cdrom_queue_packet_command(drive, &req); if (stat == 0) { diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index ebc59064b475..f04791a58df0 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -433,6 +433,7 @@ #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> +#include <linux/jiffies.h> #include <linux/major.h> #include <linux/devfs_fs_kernel.h> #include <linux/errno.h> @@ -2336,7 +2337,7 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive) } if (time_after(jiffies, tape->insert_time)) tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time); - if (jiffies - tape->avg_time >= HZ) { + if (time_after_eq(jiffies, tape->avg_time + HZ)) { tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024; tape->avg_size = 0; tape->avg_time = jiffies; @@ -2497,7 +2498,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, } else { return ide_do_reset(drive); } - } else if (jiffies - tape->dsc_polling_start > IDETAPE_DSC_MA_THRESHOLD) + } else if (time_after(jiffies, tape->dsc_polling_start + IDETAPE_DSC_MA_THRESHOLD)) tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW; idetape_postpone_request(drive); return ide_stopped; diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index 21965e5ef25e..b22ee5462318 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c @@ -347,10 +347,8 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch break; case AMD_UDMA_66: - pci_read_config_dword(dev, AMD_UDMA_TIMING, &u); - for (i = 24; i >= 0; i -= 8) - if ((u >> i) & 4) - amd_80w |= (1 << (1 - (i >> 4))); + /* no host side cable detection */ + amd_80w = 0x03; break; } @@ -386,8 +384,6 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch if (amd_clock < 20000 || amd_clock > 50000) { printk(KERN_WARNING "%s: User given PCI clock speed impossible (%d), using 33 MHz instead.\n", amd_chipset->name, amd_clock); - printk(KERN_WARNING "%s: Use ide0=ata66 if you want to assume 80-wire cable\n", - amd_chipset->name); amd_clock = 33333; } diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c index 6e3ab0c38c4d..f82e82109728 100644 --- a/drivers/ide/pci/generic.c +++ b/drivers/ide/pci/generic.c @@ -41,14 +41,15 @@ static int ide_generic_all; /* Set to claim all devices */ +#ifndef MODULE static int __init ide_generic_all_on(char *unused) { ide_generic_all = 1; printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n"); return 1; } - __setup("all-generic-ide", ide_generic_all_on); +#endif static void __devinit init_hwif_generic (ide_hwif_t *hwif) { diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 75a2253a3e68..8e9d87701ce2 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -112,6 +112,7 @@ static const struct { { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 }, { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 }, + { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 }, { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 }, }; @@ -524,6 +525,7 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio) case 3: test1 = 0x30|0x03; break; case 2: test1 = 0x40|0x04; break; case 1: test1 = 0x60|0x07; break; + case 0: test1 = 0x00; break; default: break; } pci_write_config_byte(dev, drive_pci, test1); diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 734b121a0554..491e6032bdec 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c @@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, u64 align_mask = ~(alignment - 1); if ((alignment & 3) || (alignment > 0x800000000000ULL) || - ((hweight32(alignment >> 32) + - hweight32(alignment & 0xffffffff) != 1))) { + (hweight64(alignment) != 1)) { HPSB_ERR("%s called with invalid alignment: 0x%048llx", __FUNCTION__, (unsigned long long)alignment); return retval; diff --git a/drivers/input/input.c b/drivers/input/input.c index 4fe3da3c667a..f8af0945964e 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -923,7 +923,7 @@ void input_unregister_handler(struct input_handler *handler) static int input_open_file(struct inode *inode, struct file *file) { struct input_handler *handler = input_table[iminor(inode) >> 5]; - struct file_operations *old_fops, *new_fops = NULL; + const struct file_operations *old_fops, *new_fops = NULL; int err; /* No load-on-demand here? */ diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index 1c9426fd5205..aa4a8a4ccfdb 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -270,9 +270,10 @@ static void hp_sdc_mlc_out (hil_mlc *mlc) { do_control: priv->emtestmode = mlc->opacket & HIL_CTRL_TEST; - if ((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE) { - BUG(); /* we cannot emulate this, it should not be used. */ - } + + /* we cannot emulate this, it should not be used. */ + BUG_ON((mlc->opacket & (HIL_CTRL_APE | HIL_CTRL_IPF)) == HIL_CTRL_APE); + if ((mlc->opacket & HIL_CTRL_ONLY) == HIL_CTRL_ONLY) goto control_only; if (mlc->opacket & HIL_CTRL_APE) { BUG(); /* Should not send command/data after engaging APE */ diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a81f987978c8..46d1fec2cfd8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -23,7 +23,7 @@ #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> -#include <linux/interrupt.h> +#include <asm/irq.h> #ifdef CONFIG_ARM #include <asm/mach-types.h> diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile index 03d8ccd51955..988142c30a6d 100644 --- a/drivers/isdn/Makefile +++ b/drivers/isdn/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_ISDN_DRV_SC) += sc/ obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ obj-$(CONFIG_HYSDN) += hysdn/ +obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/ diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 623adbb0d13a..9b493f0becc4 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -1485,6 +1485,7 @@ static int __init capi_init(void) { char *p; char *compileinfo; + int major_ret; if ((p = strchr(revision, ':')) != 0 && p[1]) { strlcpy(rev, p + 2, sizeof(rev)); @@ -1493,11 +1494,12 @@ static int __init capi_init(void) } else strcpy(rev, "1.0"); - if (register_chrdev(capi_major, "capi20", &capi_fops)) { + major_ret = register_chrdev(capi_major, "capi20", &capi_fops); + if (major_ret < 0) { printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); - return -EIO; + return major_ret; } - + capi_major = major_ret; capi_class = class_create(THIS_MODULE, "capi"); if (IS_ERR(capi_class)) { unregister_chrdev(capi_major, "capi20"); diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c index 2cc8b27e4c3b..ca9dc00a45c4 100644 --- a/drivers/isdn/capi/kcapi_proc.c +++ b/drivers/isdn/capi/kcapi_proc.c @@ -233,7 +233,7 @@ static struct file_operations proc_applstats_ops = { }; static void -create_seq_entry(char *name, mode_t mode, struct file_operations *f) +create_seq_entry(char *name, mode_t mode, const struct file_operations *f) { struct proc_dir_entry *entry; entry = create_proc_entry(name, mode, NULL); diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig new file mode 100644 index 000000000000..53c4fb62ed85 --- /dev/null +++ b/drivers/isdn/gigaset/Kconfig @@ -0,0 +1,42 @@ +menu "Siemens Gigaset" + depends on ISDN_I4L + +config ISDN_DRV_GIGASET + tristate "Siemens Gigaset support (isdn)" + depends on ISDN_I4L && m +# depends on ISDN_I4L && MODULES + help + Say m here if you have a Gigaset or Sinus isdn device. + +if ISDN_DRV_GIGASET!=n + +config GIGASET_BASE + tristate "Gigaset base station support" + depends on ISDN_DRV_GIGASET && USB + help + Say m here if you need to communicate with the base + directly via USB. + +config GIGASET_M105 + tristate "Gigaset M105 support" + depends on ISDN_DRV_GIGASET && USB + help + Say m here if you need the driver for the Gigaset M105 device. + +config GIGASET_DEBUG + bool "Gigaset debugging" + help + This enables debugging code in the Gigaset drivers. + If in doubt, say yes. + +config GIGASET_UNDOCREQ + bool "Support for undocumented USB requests" + help + This enables support for USB requests we only know from + reverse engineering (currently M105 only). If you need + features like configuration mode of M105, say yes. If you + care about your device, say no. + +endif + +endmenu diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile new file mode 100644 index 000000000000..9b9acf1a21ad --- /dev/null +++ b/drivers/isdn/gigaset/Makefile @@ -0,0 +1,6 @@ +gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o +usb_gigaset-y := usb-gigaset.o asyncdata.o +bas_gigaset-y := bas-gigaset.o isocdata.o + +obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o +obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c new file mode 100644 index 000000000000..171f8b703d61 --- /dev/null +++ b/drivers/isdn/gigaset/asyncdata.c @@ -0,0 +1,597 @@ +/* + * Common data handling layer for ser_gigaset and usb_gigaset + * + * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>, + * Hansjoerg Lipp <hjlipp@web.de>, + * Stefan Eilers <Eilers.Stefan@epost.de>. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: asyncdata.c,v 1.2.2.7 2005/11/13 23:05:18 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" +#include <linux/crc-ccitt.h> + +//#define GIG_M10x_STUFF_VOICE_DATA + +/* check if byte must be stuffed/escaped + * I'm not sure which data should be encoded. + * Therefore I will go the hard way and decode every value + * less than 0x20, the flag sequence and the control escape char. + */ +static inline int muststuff(unsigned char c) +{ + if (c < PPP_TRANS) return 1; + if (c == PPP_FLAG) return 1; + if (c == PPP_ESCAPE) return 1; + /* other possible candidates: */ + /* 0x91: XON with parity set */ + /* 0x93: XOFF with parity set */ + return 0; +} + +/* == data input =========================================================== */ + +/* process a block of received bytes in command mode (modem response) + * Return value: + * number of processed bytes + */ +static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes, + struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + unsigned cbytes = cs->cbytes; + int inputstate = inbuf->inputstate; + int startbytes = numbytes; + + for (;;) { + cs->respdata[cbytes] = c; + if (c == 10 || c == 13) { + dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", + __func__, cbytes); + cs->cbytes = cbytes; + gigaset_handle_modem_response(cs); /* can change cs->dle */ + cbytes = 0; + + if (cs->dle && + !(inputstate & INS_DLE_command)) { + inputstate &= ~INS_command; + break; + } + } else { + /* advance in line buffer, checking for overflow */ + if (cbytes < MAX_RESP_SIZE - 1) + cbytes++; + else + warn("response too large"); + } + + if (!numbytes) + break; + c = *src++; + --numbytes; + if (c == DLE_FLAG && + (cs->dle || inputstate & INS_DLE_command)) { + inputstate |= INS_DLE_char; + break; + } + } + + cs->cbytes = cbytes; + inbuf->inputstate = inputstate; + + return startbytes - numbytes; +} + +/* process a block of received bytes in lock mode (tty i/f) + * Return value: + * number of processed bytes + */ +static inline int lock_loop(unsigned char *src, int numbytes, + struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + + gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src, 0); + gigaset_if_receive(cs, src, numbytes); + + return numbytes; +} + +/* process a block of received bytes in HDLC data mode + * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. + * When a frame is complete, check the FCS and pass valid frames to the LL. + * If DLE is encountered, return immediately to let the caller handle it. + * Return value: + * number of processed bytes + * numbytes (all bytes processed) on error --FIXME + */ +static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes, + struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + struct bc_state *bcs = inbuf->bcs; + int inputstate; + __u16 fcs; + struct sk_buff *skb; + unsigned char error; + struct sk_buff *compskb; + int startbytes = numbytes; + int l; + + IFNULLRETVAL(bcs, numbytes); + inputstate = bcs->inputstate; + fcs = bcs->fcs; + skb = bcs->skb; + IFNULLRETVAL(skb, numbytes); + + if (unlikely(inputstate & INS_byte_stuff)) { + inputstate &= ~INS_byte_stuff; + goto byte_stuff; + } + for (;;) { + if (unlikely(c == PPP_ESCAPE)) { + if (unlikely(!numbytes)) { + inputstate |= INS_byte_stuff; + break; + } + c = *src++; + --numbytes; + if (unlikely(c == DLE_FLAG && + (cs->dle || + inbuf->inputstate & INS_DLE_command))) { + inbuf->inputstate |= INS_DLE_char; + inputstate |= INS_byte_stuff; + break; + } +byte_stuff: + c ^= PPP_TRANS; +#ifdef CONFIG_GIGASET_DEBUG + if (unlikely(!muststuff(c))) + dbg(DEBUG_HDLC, + "byte stuffed: 0x%02x", c); +#endif + } else if (unlikely(c == PPP_FLAG)) { + if (unlikely(inputstate & INS_skip_frame)) { + if (!(inputstate & INS_have_data)) { /* 7E 7E */ + //dbg(DEBUG_HDLC, "(7e)7e------------------------"); +#ifdef CONFIG_GIGASET_DEBUG + ++bcs->emptycount; +#endif + } else + dbg(DEBUG_HDLC, + "7e----------------------------"); + + /* end of frame */ + error = 1; + gigaset_rcv_error(NULL, cs, bcs); + } else if (!(inputstate & INS_have_data)) { /* 7E 7E */ + //dbg(DEBUG_HDLC, "(7e)7e------------------------"); +#ifdef CONFIG_GIGASET_DEBUG + ++bcs->emptycount; +#endif + break; + } else { + dbg(DEBUG_HDLC, + "7e----------------------------"); + + /* end of frame */ + error = 0; + + if (unlikely(fcs != PPP_GOODFCS)) { + err("Packet checksum at %lu failed, " + "packet is corrupted (%u bytes)!", + bcs->rcvbytes, skb->len); + compskb = NULL; + gigaset_rcv_error(compskb, cs, bcs); + error = 1; + } else { + if (likely((l = skb->len) > 2)) { + skb->tail -= 2; + skb->len -= 2; + } else { + dev_kfree_skb(skb); + skb = NULL; + inputstate |= INS_skip_frame; + if (l == 1) { + err("invalid packet size (1)!"); + error = 1; + gigaset_rcv_error(NULL, cs, bcs); + } + } + if (likely(!(error || + (inputstate & + INS_skip_frame)))) { + gigaset_rcv_skb(skb, cs, bcs); + } + } + } + + if (unlikely(error)) + if (skb) + dev_kfree_skb(skb); + + fcs = PPP_INITFCS; + inputstate &= ~(INS_have_data | INS_skip_frame); + if (unlikely(bcs->ignore)) { + inputstate |= INS_skip_frame; + skb = NULL; + } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) { + skb_reserve(skb, HW_HDR_LEN); + } else { + warn("could not allocate new skb"); + inputstate |= INS_skip_frame; + } + + break; +#ifdef CONFIG_GIGASET_DEBUG + } else if (unlikely(muststuff(c))) { + /* Should not happen. Possible after ZDLE=1<CR><LF>. */ + dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); +#endif + } + + /* add character */ + +#ifdef CONFIG_GIGASET_DEBUG + if (unlikely(!(inputstate & INS_have_data))) { + dbg(DEBUG_HDLC, + "7e (%d x) ================", bcs->emptycount); + bcs->emptycount = 0; + } +#endif + + inputstate |= INS_have_data; + + if (likely(!(inputstate & INS_skip_frame))) { + if (unlikely(skb->len == SBUFSIZE)) { + warn("received packet too long"); + dev_kfree_skb_any(skb); + skb = NULL; + inputstate |= INS_skip_frame; + break; + } + *gigaset_skb_put_quick(skb, 1) = c; + /* *__skb_put (skb, 1) = c; */ + fcs = crc_ccitt_byte(fcs, c); + } + + if (unlikely(!numbytes)) + break; + c = *src++; + --numbytes; + if (unlikely(c == DLE_FLAG && + (cs->dle || + inbuf->inputstate & INS_DLE_command))) { + inbuf->inputstate |= INS_DLE_char; + break; + } + } + bcs->inputstate = inputstate; + bcs->fcs = fcs; + bcs->skb = skb; + return startbytes - numbytes; +} + +/* process a block of received bytes in transparent data mode + * Invert bytes, undoing byte stuffing and watching for DLE escapes. + * If DLE is encountered, return immediately to let the caller handle it. + * Return value: + * number of processed bytes + * numbytes (all bytes processed) on error --FIXME + */ +static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, + struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + struct bc_state *bcs = inbuf->bcs; + int inputstate; + struct sk_buff *skb; + int startbytes = numbytes; + + IFNULLRETVAL(bcs, numbytes); + inputstate = bcs->inputstate; + skb = bcs->skb; + IFNULLRETVAL(skb, numbytes); + + for (;;) { + /* add character */ + inputstate |= INS_have_data; + + if (likely(!(inputstate & INS_skip_frame))) { + if (unlikely(skb->len == SBUFSIZE)) { + //FIXME just pass skb up and allocate a new one + warn("received packet too long"); + dev_kfree_skb_any(skb); + skb = NULL; + inputstate |= INS_skip_frame; + break; + } + *gigaset_skb_put_quick(skb, 1) = gigaset_invtab[c]; + } + + if (unlikely(!numbytes)) + break; + c = *src++; + --numbytes; + if (unlikely(c == DLE_FLAG && + (cs->dle || + inbuf->inputstate & INS_DLE_command))) { + inbuf->inputstate |= INS_DLE_char; + break; + } + } + + /* pass data up */ + if (likely(inputstate & INS_have_data)) { + if (likely(!(inputstate & INS_skip_frame))) { + gigaset_rcv_skb(skb, cs, bcs); + } + inputstate &= ~(INS_have_data | INS_skip_frame); + if (unlikely(bcs->ignore)) { + inputstate |= INS_skip_frame; + skb = NULL; + } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) + != NULL)) { + skb_reserve(skb, HW_HDR_LEN); + } else { + warn("could not allocate new skb"); + inputstate |= INS_skip_frame; + } + } + + bcs->inputstate = inputstate; + bcs->skb = skb; + return startbytes - numbytes; +} + +/* process a block of data received from the device + */ +void gigaset_m10x_input(struct inbuf_t *inbuf) +{ + struct cardstate *cs; + unsigned tail, head, numbytes; + unsigned char *src, c; + int procbytes; + + head = atomic_read(&inbuf->head); + tail = atomic_read(&inbuf->tail); + dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); + + if (head != tail) { + cs = inbuf->cs; + src = inbuf->data + head; + numbytes = (head > tail ? RBUFSIZE : tail) - head; + dbg(DEBUG_INTR, "processing %u bytes", numbytes); + + while (numbytes) { + if (atomic_read(&cs->mstate) == MS_LOCKED) { + procbytes = lock_loop(src, numbytes, inbuf); + src += procbytes; + numbytes -= procbytes; + } else { + c = *src++; + --numbytes; + if (c == DLE_FLAG && (cs->dle || + inbuf->inputstate & INS_DLE_command)) { + if (!(inbuf->inputstate & INS_DLE_char)) { + inbuf->inputstate |= INS_DLE_char; + goto nextbyte; + } + /* <DLE> <DLE> => <DLE> in data stream */ + inbuf->inputstate &= ~INS_DLE_char; + } + + if (!(inbuf->inputstate & INS_DLE_char)) { + + /* FIXME Einfach je nach Modus Funktionszeiger in cs setzen [hier+hdlc_loop]? */ + /* FIXME Spart folgendes "if" und ermoeglicht andere Protokolle */ + if (inbuf->inputstate & INS_command) + procbytes = cmd_loop(c, src, numbytes, inbuf); + else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC) + procbytes = hdlc_loop(c, src, numbytes, inbuf); + else + procbytes = iraw_loop(c, src, numbytes, inbuf); + + src += procbytes; + numbytes -= procbytes; + } else { /* DLE-char */ + inbuf->inputstate &= ~INS_DLE_char; + switch (c) { + case 'X': /*begin of command*/ +#ifdef CONFIG_GIGASET_DEBUG + if (inbuf->inputstate & INS_command) + err("received <DLE> 'X' in command mode"); +#endif + inbuf->inputstate |= + INS_command | INS_DLE_command; + break; + case '.': /*end of command*/ +#ifdef CONFIG_GIGASET_DEBUG + if (!(inbuf->inputstate & INS_command)) + err("received <DLE> '.' in hdlc mode"); +#endif + inbuf->inputstate &= cs->dle ? + ~(INS_DLE_command|INS_command) + : ~INS_DLE_command; + break; + //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */ + default: + err("received 0x10 0x%02x!", (int) c); + /* FIXME: reset driver?? */ + } + } + } +nextbyte: + if (!numbytes) { + /* end of buffer, check for wrap */ + if (head > tail) { + head = 0; + src = inbuf->data; + numbytes = tail; + } else { + head = tail; + break; + } + } + } + + dbg(DEBUG_INTR, "setting head to %u", head); + atomic_set(&inbuf->head, head); + } +} + + +/* == data output ========================================================== */ + +/* Encoding of a PPP packet into an octet stuffed HDLC frame + * with FCS, opening and closing flags. + * parameters: + * skb skb containing original packet (freed upon return) + * head number of headroom bytes to allocate in result skb + * tail number of tailroom bytes to allocate in result skb + * Return value: + * pointer to newly allocated skb containing the result frame + */ +static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail) +{ + struct sk_buff *hdlc_skb; + __u16 fcs; + unsigned char c; + unsigned char *cp; + int len; + unsigned int stuf_cnt; + + stuf_cnt = 0; + fcs = PPP_INITFCS; + cp = skb->data; + len = skb->len; + while (len--) { + if (muststuff(*cp)) + stuf_cnt++; + fcs = crc_ccitt_byte(fcs, *cp++); + } + fcs ^= 0xffff; /* complement */ + + /* size of new buffer: original size + number of stuffing bytes + * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes + */ + hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head); + if (!hdlc_skb) { + err("unable to allocate memory for HDLC encoding!"); + dev_kfree_skb(skb); + return NULL; + } + skb_reserve(hdlc_skb, head); + + /* Copy acknowledge request into new skb */ + memcpy(hdlc_skb->head, skb->head, 2); + + /* Add flag sequence in front of everything.. */ + *(skb_put(hdlc_skb, 1)) = PPP_FLAG; + + /* Perform byte stuffing while copying data. */ + while (skb->len--) { + if (muststuff(*skb->data)) { + *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; + *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS; + } else + *(skb_put(hdlc_skb, 1)) = *skb->data++; + } + + /* Finally add FCS (byte stuffed) and flag sequence */ + c = (fcs & 0x00ff); /* least significant byte first */ + if (muststuff(c)) { + *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; + c ^= PPP_TRANS; + } + *(skb_put(hdlc_skb, 1)) = c; + + c = ((fcs >> 8) & 0x00ff); + if (muststuff(c)) { + *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; + c ^= PPP_TRANS; + } + *(skb_put(hdlc_skb, 1)) = c; + + *(skb_put(hdlc_skb, 1)) = PPP_FLAG; + + dev_kfree_skb(skb); + return hdlc_skb; +} + +/* Encoding of a raw packet into an octet stuffed bit inverted frame + * parameters: + * skb skb containing original packet (freed upon return) + * head number of headroom bytes to allocate in result skb + * tail number of tailroom bytes to allocate in result skb + * Return value: + * pointer to newly allocated skb containing the result frame + */ +static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) +{ + struct sk_buff *iraw_skb; + unsigned char c; + unsigned char *cp; + int len; + + /* worst case: every byte must be stuffed */ + iraw_skb = dev_alloc_skb(2*skb->len + tail + head); + if (!iraw_skb) { + err("unable to allocate memory for HDLC encoding!"); + dev_kfree_skb(skb); + return NULL; + } + skb_reserve(iraw_skb, head); + + cp = skb->data; + len = skb->len; + while (len--) { + c = gigaset_invtab[*cp++]; + if (c == DLE_FLAG) + *(skb_put(iraw_skb, 1)) = c; + *(skb_put(iraw_skb, 1)) = c; + } + dev_kfree_skb(skb); + return iraw_skb; +} + +/* gigaset_send_skb + * called by common.c to queue an skb for sending + * and start transmission if necessary + * parameters: + * B Channel control structure + * skb + * Return value: + * number of bytes accepted for sending + * (skb->len if ok, 0 if out of buffer space) + * or error code (< 0, eg. -EINVAL) + */ +int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) +{ + unsigned len; + + IFNULLRETVAL(bcs, -EFAULT); + IFNULLRETVAL(skb, -EFAULT); + len = skb->len; + + if (bcs->proto2 == ISDN_PROTO_L2_HDLC) + skb = HDLC_Encode(skb, HW_HDR_LEN, 0); + else + skb = iraw_encode(skb, HW_HDR_LEN, 0); + if (!skb) + return -ENOMEM; + + skb_queue_tail(&bcs->squeue, skb); + tasklet_schedule(&bcs->cs->write_tasklet); + + return len; /* ok so far */ +} diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c new file mode 100644 index 000000000000..31f0f07832bc --- /dev/null +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -0,0 +1,2365 @@ +/* + * USB driver for Gigaset 307x base via direct USB connection. + * + * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>, + * Tilman Schmidt <tilman@imap.cc>, + * Stefan Eilers <Eilers.Stefan@epost.de>. + * + * Based on usb-gigaset.c. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: bas-gigaset.c,v 1.52.4.19 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/usb.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +/* Version Information */ +#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>" +#define DRIVER_DESC "USB Driver for Gigaset 307x" + + +/* Module parameters */ + +static int startmode = SM_ISDN; +static int cidmode = 1; + +module_param(startmode, int, S_IRUGO); +module_param(cidmode, int, S_IRUGO); +MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); +MODULE_PARM_DESC(cidmode, "Call-ID mode"); + +#define GIGASET_MINORS 1 +#define GIGASET_MINOR 16 +#define GIGASET_MODULENAME "bas_gigaset" +#define GIGASET_DEVFSNAME "gig/bas/" +#define GIGASET_DEVNAME "ttyGB" + +#define IF_WRITEBUF 256 //FIXME + +/* Values for the Gigaset 307x */ +#define USB_GIGA_VENDOR_ID 0x0681 +#define USB_GIGA_PRODUCT_ID 0x0001 +#define USB_4175_PRODUCT_ID 0x0002 +#define USB_SX303_PRODUCT_ID 0x0021 +#define USB_SX353_PRODUCT_ID 0x0022 + +/* table of devices that work with this driver */ +static struct usb_device_id gigaset_table [] = { + { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_GIGA_PRODUCT_ID) }, + { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_4175_PRODUCT_ID) }, + { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) }, + { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, gigaset_table); + +/* Get a minor range for your devices from the usb maintainer */ +#define USB_SKEL_MINOR_BASE 200 + +/*======================= local function prototypes =============================*/ + +/* This function is called if a new device is connected to the USB port. It + * checks whether this new device belongs to this driver. + */ +static int gigaset_probe(struct usb_interface *interface, + const struct usb_device_id *id); + +/* Function will be called if the device is unplugged */ +static void gigaset_disconnect(struct usb_interface *interface); + + +/*==============================================================================*/ + +struct bas_cardstate { + struct usb_device *udev; /* USB device pointer */ + struct usb_interface *interface; /* interface for this device */ + unsigned char minor; /* starting minor number */ + + struct urb *urb_ctrl; /* control pipe default URB */ + struct usb_ctrlrequest dr_ctrl; + struct timer_list timer_ctrl; /* control request timeout */ + + struct timer_list timer_atrdy; /* AT command ready timeout */ + struct urb *urb_cmd_out; /* for sending AT commands */ + struct usb_ctrlrequest dr_cmd_out; + int retry_cmd_out; + + struct urb *urb_cmd_in; /* for receiving AT replies */ + struct usb_ctrlrequest dr_cmd_in; + struct timer_list timer_cmd_in; /* receive request timeout */ + unsigned char *rcvbuf; /* AT reply receive buffer */ + + struct urb *urb_int_in; /* URB for interrupt pipe */ + unsigned char int_in_buf[3]; + + spinlock_t lock; /* locks all following */ + atomic_t basstate; /* bitmap (BS_*) */ + int pending; /* uncompleted base request */ + int rcvbuf_size; /* size of AT receive buffer */ + /* 0: no receive in progress */ + int retry_cmd_in; /* receive req retry count */ +}; + +/* status of direct USB connection to 307x base (bits in basstate) */ +#define BS_ATOPEN 0x001 +#define BS_B1OPEN 0x002 +#define BS_B2OPEN 0x004 +#define BS_ATREADY 0x008 +#define BS_INIT 0x010 +#define BS_ATTIMER 0x020 + + +static struct gigaset_driver *driver = NULL; +static struct cardstate *cardstate = NULL; + +/* usb specific object needed to register this driver with the usb subsystem */ +static struct usb_driver gigaset_usb_driver = { + .name = GIGASET_MODULENAME, + .probe = gigaset_probe, + .disconnect = gigaset_disconnect, + .id_table = gigaset_table, +}; + +/* get message text for USB status code + */ +static char *get_usb_statmsg(int status) +{ + static char unkmsg[28]; + + switch (status) { + case 0: + return "success"; + case -ENOENT: + return "canceled"; + case -ECONNRESET: + return "canceled (async)"; + case -EINPROGRESS: + return "pending"; + case -EPROTO: + return "bit stuffing or unknown USB error"; + case -EILSEQ: + return "Illegal byte sequence (CRC mismatch)"; + case -EPIPE: + return "babble detect or endpoint stalled"; + case -ENOSR: + return "buffer error"; + case -ETIMEDOUT: + return "timed out"; + case -ENODEV: + return "device not present"; + case -EREMOTEIO: + return "short packet detected"; + case -EXDEV: + return "partial isochronous transfer"; + case -EINVAL: + return "invalid argument"; + case -ENXIO: + return "URB already queued"; + case -EAGAIN: + return "isochronous start frame too early or too much scheduled"; + case -EFBIG: + return "too many isochronous frames requested"; + case -EMSGSIZE: + return "endpoint message size zero"; + case -ESHUTDOWN: + return "endpoint shutdown"; + case -EBUSY: + return "another request pending"; + default: + snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", status); + return unkmsg; + } +} + +/* usb_pipetype_str + * retrieve string representation of USB pipe type + */ +static inline char *usb_pipetype_str(int pipe) +{ + if (usb_pipeisoc(pipe)) + return "Isoc"; + if (usb_pipeint(pipe)) + return "Int"; + if (usb_pipecontrol(pipe)) + return "Ctrl"; + if (usb_pipebulk(pipe)) + return "Bulk"; + return "?"; +} + +/* dump_urb + * write content of URB to syslog for debugging + */ +static inline void dump_urb(enum debuglevel level, const char *tag, + struct urb *urb) +{ +#ifdef CONFIG_GIGASET_DEBUG + int i; + IFNULLRET(tag); + dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb); + if (urb) { + dbg(level, + " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, " + "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,", + (unsigned long) urb->dev, + usb_pipetype_str(urb->pipe), + usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + urb->status, (unsigned long) urb->hcpriv, + urb->transfer_flags); + dbg(level, + " transfer_buffer=0x%08lx[%d], actual_length=%d, " + "bandwidth=%d, setup_packet=0x%08lx,", + (unsigned long) urb->transfer_buffer, + urb->transfer_buffer_length, urb->actual_length, + urb->bandwidth, (unsigned long) urb->setup_packet); + dbg(level, + " start_frame=%d, number_of_packets=%d, interval=%d, " + "error_count=%d,", + urb->start_frame, urb->number_of_packets, urb->interval, + urb->error_count); + dbg(level, + " context=0x%08lx, complete=0x%08lx, iso_frame_desc[]={", + (unsigned long) urb->context, + (unsigned long) urb->complete); + for (i = 0; i < urb->number_of_packets; i++) { + struct usb_iso_packet_descriptor *pifd = &urb->iso_frame_desc[i]; + dbg(level, + " {offset=%u, length=%u, actual_length=%u, " + "status=%u}", + pifd->offset, pifd->length, pifd->actual_length, + pifd->status); + } + } + dbg(level, "}}"); +#endif +} + +/* read/set modem control bits etc. (m10x only) */ +static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, + unsigned new_state) +{ + return -EINVAL; +} + +static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) +{ + return -EINVAL; +} + +static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) +{ + return -EINVAL; +} + +/* error_hangup + * hang up any existing connection because of an unrecoverable error + * This function may be called from any context and takes care of scheduling + * the necessary actions for execution outside of interrupt context. + * argument: + * B channel control structure + */ +static inline void error_hangup(struct bc_state *bcs) +{ + struct cardstate *cs = bcs->cs; + + dbg(DEBUG_ANY, + "%s: scheduling HUP for channel %d", __func__, bcs->channel); + + if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { + //FIXME what should we do? + return; + } + + gigaset_schedule_event(cs); +} + +/* error_reset + * reset Gigaset device because of an unrecoverable error + * This function may be called from any context and takes care of scheduling + * the necessary actions for execution outside of interrupt context. + * argument: + * controller state structure + */ +static inline void error_reset(struct cardstate *cs) +{ + //FIXME try to recover without bothering the user + err("unrecoverable error - please disconnect the Gigaset base to reset"); +} + +/* check_pending + * check for completion of pending control request + * parameter: + * urb USB request block of completed request + * urb->context = hardware specific controller state structure + */ +static void check_pending(struct bas_cardstate *ucs) +{ + unsigned long flags; + + IFNULLRET(ucs); + IFNULLRET(cardstate); + + spin_lock_irqsave(&ucs->lock, flags); + switch (ucs->pending) { + case 0: + break; + case HD_OPEN_ATCHANNEL: + if (atomic_read(&ucs->basstate) & BS_ATOPEN) + ucs->pending = 0; + break; + case HD_OPEN_B1CHANNEL: + if (atomic_read(&ucs->basstate) & BS_B1OPEN) + ucs->pending = 0; + break; + case HD_OPEN_B2CHANNEL: + if (atomic_read(&ucs->basstate) & BS_B2OPEN) + ucs->pending = 0; + break; + case HD_CLOSE_ATCHANNEL: + if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) + ucs->pending = 0; + //wake_up_interruptible(cs->initwait); + //FIXME need own wait queue? + break; + case HD_CLOSE_B1CHANNEL: + if (!(atomic_read(&ucs->basstate) & BS_B1OPEN)) + ucs->pending = 0; + break; + case HD_CLOSE_B2CHANNEL: + if (!(atomic_read(&ucs->basstate) & BS_B2OPEN)) + ucs->pending = 0; + break; + case HD_DEVICE_INIT_ACK: /* no reply expected */ + ucs->pending = 0; + break; + /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE + * are handled separately and should never end up here + */ + default: + warn("unknown pending request 0x%02x cleared", ucs->pending); + ucs->pending = 0; + } + + if (!ucs->pending) + del_timer(&ucs->timer_ctrl); + + spin_unlock_irqrestore(&ucs->lock, flags); +} + +/* cmd_in_timeout + * timeout routine for command input request + * argument: + * controller state structure + */ +static void cmd_in_timeout(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + struct bas_cardstate *ucs; + unsigned long flags; + + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + spin_lock_irqsave(&cs->lock, flags); + if (!atomic_read(&cs->connected)) { + dbg(DEBUG_USBREQ, "%s: disconnected", __func__); + spin_unlock_irqrestore(&cs->lock, flags); + return; + } + if (!ucs->rcvbuf_size) { + dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__); + spin_unlock_irqrestore(&cs->lock, flags); + return; + } + spin_unlock_irqrestore(&cs->lock, flags); + + err("timeout reading AT response"); + error_reset(cs); //FIXME retry? +} + + +static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs); + +/* atread_submit + * submit an HD_READ_ATMESSAGE command URB + * parameters: + * cs controller state structure + * timeout timeout in 1/10 sec., 0: none + * return value: + * 0 on success + * -EINVAL if a NULL pointer is encountered somewhere + * -EBUSY if another request is pending + * any URB submission error code + */ +static int atread_submit(struct cardstate *cs, int timeout) +{ + struct bas_cardstate *ucs; + int ret; + + IFNULLRETVAL(cs, -EINVAL); + ucs = cs->hw.bas; + IFNULLRETVAL(ucs, -EINVAL); + IFNULLRETVAL(ucs->urb_cmd_in, -EINVAL); + + dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)", ucs->rcvbuf_size); + + if (ucs->urb_cmd_in->status == -EINPROGRESS) { + err("could not submit HD_READ_ATMESSAGE: URB busy"); + return -EBUSY; + } + + ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ; + ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE; + ucs->dr_cmd_in.wValue = 0; + ucs->dr_cmd_in.wIndex = 0; + ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size); + usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, + usb_rcvctrlpipe(ucs->udev, 0), + (unsigned char*) & ucs->dr_cmd_in, + ucs->rcvbuf, ucs->rcvbuf_size, + read_ctrl_callback, cs->inbuf); + + if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) { + err("could not submit HD_READ_ATMESSAGE: %s", + get_usb_statmsg(ret)); + return ret; + } + + if (timeout > 0) { + dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); + ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10; + ucs->timer_cmd_in.data = (unsigned long) cs; + ucs->timer_cmd_in.function = cmd_in_timeout; + add_timer(&ucs->timer_cmd_in); + } + return 0; +} + +static void stopurbs(struct bas_bc_state *); +static int start_cbsend(struct cardstate *); + +/* set/clear bits in base connection state + */ +inline static void update_basstate(struct bas_cardstate *ucs, + int set, int clear) +{ + unsigned long flags; + int state; + + spin_lock_irqsave(&ucs->lock, flags); + state = atomic_read(&ucs->basstate); + state &= ~clear; + state |= set; + atomic_set(&ucs->basstate, state); + spin_unlock_irqrestore(&ucs->lock, flags); +} + + +/* read_int_callback + * USB completion handler for interrupt pipe input + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block + * urb->context = controller state structure + */ +static void read_int_callback(struct urb *urb, struct pt_regs *regs) +{ + struct cardstate *cs; + struct bas_cardstate *ucs; + struct bc_state *bcs; + unsigned long flags; + int status; + unsigned l; + int channel; + + IFNULLRET(urb); + cs = (struct cardstate *) urb->context; + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + if (unlikely(!atomic_read(&cs->connected))) { + warn("%s: disconnected", __func__); + return; + } + + switch (urb->status) { + case 0: /* success */ + break; + case -ENOENT: /* canceled */ + case -ECONNRESET: /* canceled (async) */ + case -EINPROGRESS: /* pending */ + /* ignore silently */ + dbg(DEBUG_USBREQ, + "%s: %s", __func__, get_usb_statmsg(urb->status)); + return; + default: /* severe trouble */ + warn("interrupt read: %s", get_usb_statmsg(urb->status)); + //FIXME corrective action? resubmission always ok? + goto resubmit; + } + + l = (unsigned) ucs->int_in_buf[1] + + (((unsigned) ucs->int_in_buf[2]) << 8); + + dbg(DEBUG_USBREQ, + "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", urb->actual_length, + (int)ucs->int_in_buf[0], l, + (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]); + + channel = 0; + + switch (ucs->int_in_buf[0]) { + case HD_DEVICE_INIT_OK: + update_basstate(ucs, BS_INIT, 0); + break; + + case HD_READY_SEND_ATDATA: + del_timer(&ucs->timer_atrdy); + update_basstate(ucs, BS_ATREADY, BS_ATTIMER); + start_cbsend(cs); + break; + + case HD_OPEN_B2CHANNEL_ACK: + ++channel; + case HD_OPEN_B1CHANNEL_ACK: + bcs = cs->bcs + channel; + update_basstate(ucs, BS_B1OPEN << channel, 0); + gigaset_bchannel_up(bcs); + break; + + case HD_OPEN_ATCHANNEL_ACK: + update_basstate(ucs, BS_ATOPEN, 0); + start_cbsend(cs); + break; + + case HD_CLOSE_B2CHANNEL_ACK: + ++channel; + case HD_CLOSE_B1CHANNEL_ACK: + bcs = cs->bcs + channel; + update_basstate(ucs, 0, BS_B1OPEN << channel); + stopurbs(bcs->hw.bas); + gigaset_bchannel_down(bcs); + break; + + case HD_CLOSE_ATCHANNEL_ACK: + update_basstate(ucs, 0, BS_ATOPEN); + break; + + case HD_B2_FLOW_CONTROL: + ++channel; + case HD_B1_FLOW_CONTROL: + bcs = cs->bcs + channel; + atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES, + &bcs->hw.bas->corrbytes); + dbg(DEBUG_ISO, + "Flow control (channel %d, sub %d): 0x%02x => %d", + channel, bcs->hw.bas->numsub, l, + atomic_read(&bcs->hw.bas->corrbytes)); + break; + + case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */ + if (!l) { + warn("HD_RECEIVEATDATA_ACK with length 0 ignored"); + break; + } + spin_lock_irqsave(&cs->lock, flags); + if (ucs->rcvbuf_size) { + spin_unlock_irqrestore(&cs->lock, flags); + err("receive AT data overrun, %d bytes lost", l); + error_reset(cs); //FIXME reschedule + break; + } + if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) { + spin_unlock_irqrestore(&cs->lock, flags); + err("%s: out of memory, %d bytes lost", __func__, l); + error_reset(cs); //FIXME reschedule + break; + } + ucs->rcvbuf_size = l; + ucs->retry_cmd_in = 0; + if ((status = atread_submit(cs, BAS_TIMEOUT)) < 0) { + kfree(ucs->rcvbuf); + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + error_reset(cs); //FIXME reschedule + } + spin_unlock_irqrestore(&cs->lock, flags); + break; + + case HD_RESET_INTERRUPT_PIPE_ACK: + dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); + break; + + case HD_SUSPEND_END: + dbg(DEBUG_USBREQ, "HD_SUSPEND_END"); + break; + + default: + warn("unknown Gigaset signal 0x%02x (%u) ignored", + (int) ucs->int_in_buf[0], l); + } + + check_pending(ucs); + +resubmit: + status = usb_submit_urb(urb, SLAB_ATOMIC); + if (unlikely(status)) { + err("could not resubmit interrupt URB: %s", + get_usb_statmsg(status)); + error_reset(cs); + } +} + +/* read_ctrl_callback + * USB completion handler for control pipe input + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block + * urb->context = inbuf structure for controller state + */ +static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs) +{ + struct cardstate *cs; + struct bas_cardstate *ucs; + unsigned numbytes; + unsigned long flags; + struct inbuf_t *inbuf; + int have_data = 0; + + IFNULLRET(urb); + inbuf = (struct inbuf_t *) urb->context; + IFNULLRET(inbuf); + cs = inbuf->cs; + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + spin_lock_irqsave(&cs->lock, flags); + if (!atomic_read(&cs->connected)) { + warn("%s: disconnected", __func__); + spin_unlock_irqrestore(&cs->lock, flags); + return; + } + + if (!ucs->rcvbuf_size) { + warn("%s: no receive in progress", __func__); + spin_unlock_irqrestore(&cs->lock, flags); + return; + } + + del_timer(&ucs->timer_cmd_in); + + switch (urb->status) { + case 0: /* normal completion */ + numbytes = urb->actual_length; + if (unlikely(numbytes == 0)) { + warn("control read: empty block received"); + goto retry; + } + if (unlikely(numbytes != ucs->rcvbuf_size)) { + warn("control read: received %d chars, expected %d", + numbytes, ucs->rcvbuf_size); + if (numbytes > ucs->rcvbuf_size) + numbytes = ucs->rcvbuf_size; + } + + /* copy received bytes to inbuf */ + have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes); + + if (unlikely(numbytes < ucs->rcvbuf_size)) { + /* incomplete - resubmit for remaining bytes */ + ucs->rcvbuf_size -= numbytes; + ucs->retry_cmd_in = 0; + goto retry; + } + break; + + case -ENOENT: /* canceled */ + case -ECONNRESET: /* canceled (async) */ + case -EINPROGRESS: /* pending */ + /* no action necessary */ + dbg(DEBUG_USBREQ, + "%s: %s", __func__, get_usb_statmsg(urb->status)); + break; + + default: /* severe trouble */ + warn("control read: %s", get_usb_statmsg(urb->status)); + retry: + if (ucs->retry_cmd_in++ < BAS_RETRY) { + notice("control read: retry %d", ucs->retry_cmd_in); + if (atread_submit(cs, BAS_TIMEOUT) >= 0) { + /* resubmitted - bypass regular exit block */ + spin_unlock_irqrestore(&cs->lock, flags); + return; + } + } else { + err("control read: giving up after %d tries", + ucs->retry_cmd_in); + } + error_reset(cs); + } + + kfree(ucs->rcvbuf); + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + spin_unlock_irqrestore(&cs->lock, flags); + if (have_data) { + dbg(DEBUG_INTR, "%s-->BH", __func__); + gigaset_schedule_event(cs); + } +} + +/* read_iso_callback + * USB completion handler for B channel isochronous input + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block of completed request + * urb->context = bc_state structure + */ +static void read_iso_callback(struct urb *urb, struct pt_regs *regs) +{ + struct bc_state *bcs; + struct bas_bc_state *ubc; + unsigned long flags; + int i, rc; + + IFNULLRET(urb); + IFNULLRET(urb->context); + IFNULLRET(cardstate); + + /* status codes not worth bothering the tasklet with */ + if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET || + urb->status == -EINPROGRESS)) { + dbg(DEBUG_ISO, + "%s: %s", __func__, get_usb_statmsg(urb->status)); + return; + } + + bcs = (struct bc_state *) urb->context; + ubc = bcs->hw.bas; + IFNULLRET(ubc); + + spin_lock_irqsave(&ubc->isoinlock, flags); + if (likely(ubc->isoindone == NULL)) { + /* pass URB to tasklet */ + ubc->isoindone = urb; + tasklet_schedule(&ubc->rcvd_tasklet); + } else { + /* tasklet still busy, drop data and resubmit URB */ + ubc->loststatus = urb->status; + for (i = 0; i < BAS_NUMFRAMES; i++) { + ubc->isoinlost += urb->iso_frame_desc[i].actual_length; + if (unlikely(urb->iso_frame_desc[i].status != 0 && + urb->iso_frame_desc[i].status != -EINPROGRESS)) { + ubc->loststatus = urb->iso_frame_desc[i].status; + } + urb->iso_frame_desc[i].status = 0; + urb->iso_frame_desc[i].actual_length = 0; + } + if (likely(atomic_read(&ubc->running))) { + urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ + urb->transfer_flags = URB_ISO_ASAP; + urb->number_of_packets = BAS_NUMFRAMES; + dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__); + rc = usb_submit_urb(urb, SLAB_ATOMIC); + if (unlikely(rc != 0)) { + err("could not resubmit isochronous read URB: %s", + get_usb_statmsg(rc)); + dump_urb(DEBUG_ISO, "isoc read", urb); + error_hangup(bcs); + } + } + } + spin_unlock_irqrestore(&ubc->isoinlock, flags); +} + +/* write_iso_callback + * USB completion handler for B channel isochronous output + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block of completed request + * urb->context = isow_urbctx_t structure + */ +static void write_iso_callback(struct urb *urb, struct pt_regs *regs) +{ + struct isow_urbctx_t *ucx; + struct bas_bc_state *ubc; + unsigned long flags; + + IFNULLRET(urb); + IFNULLRET(urb->context); + IFNULLRET(cardstate); + + /* status codes not worth bothering the tasklet with */ + if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET || + urb->status == -EINPROGRESS)) { + dbg(DEBUG_ISO, + "%s: %s", __func__, get_usb_statmsg(urb->status)); + return; + } + + /* pass URB context to tasklet */ + ucx = (struct isow_urbctx_t *) urb->context; + IFNULLRET(ucx->bcs); + ubc = ucx->bcs->hw.bas; + IFNULLRET(ubc); + + spin_lock_irqsave(&ubc->isooutlock, flags); + ubc->isooutovfl = ubc->isooutdone; + ubc->isooutdone = ucx; + spin_unlock_irqrestore(&ubc->isooutlock, flags); + tasklet_schedule(&ubc->sent_tasklet); +} + +/* starturbs + * prepare and submit USB request blocks for isochronous input and output + * argument: + * B channel control structure + * return value: + * 0 on success + * < 0 on error (no URBs submitted) + */ +static int starturbs(struct bc_state *bcs) +{ + struct urb *urb; + struct bas_bc_state *ubc; + int j, k; + int rc; + + IFNULLRETVAL(bcs, -EFAULT); + ubc = bcs->hw.bas; + IFNULLRETVAL(ubc, -EFAULT); + + /* initialize L2 reception */ + if (bcs->proto2 == ISDN_PROTO_L2_HDLC) + bcs->inputstate |= INS_flag_hunt; + + /* submit all isochronous input URBs */ + atomic_set(&ubc->running, 1); + for (k = 0; k < BAS_INURBS; k++) { + urb = ubc->isoinurbs[k]; + if (!urb) { + err("isoinurbs[%d]==NULL", k); + rc = -EFAULT; + goto error; + } + + urb->dev = bcs->cs->hw.bas->udev; + urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel); + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE; + urb->transfer_buffer_length = BAS_INBUFSIZE; + urb->number_of_packets = BAS_NUMFRAMES; + urb->interval = BAS_FRAMETIME; + urb->complete = read_iso_callback; + urb->context = bcs; + for (j = 0; j < BAS_NUMFRAMES; j++) { + urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME; + urb->iso_frame_desc[j].length = BAS_MAXFRAME; + urb->iso_frame_desc[j].status = 0; + urb->iso_frame_desc[j].actual_length = 0; + } + + dump_urb(DEBUG_ISO, "Initial isoc read", urb); + if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { + err("could not submit isochronous read URB %d: %s", + k, get_usb_statmsg(rc)); + goto error; + } + } + + /* initialize L2 transmission */ + gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG); + + /* set up isochronous output URBs for flag idling */ + for (k = 0; k < BAS_OUTURBS; ++k) { + urb = ubc->isoouturbs[k].urb; + if (!urb) { + err("isoouturbs[%d].urb==NULL", k); + rc = -EFAULT; + goto error; + } + urb->dev = bcs->cs->hw.bas->udev; + urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel); + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = ubc->isooutbuf->data; + urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); + urb->number_of_packets = BAS_NUMFRAMES; + urb->interval = BAS_FRAMETIME; + urb->complete = write_iso_callback; + urb->context = &ubc->isoouturbs[k]; + for (j = 0; j < BAS_NUMFRAMES; ++j) { + urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE; + urb->iso_frame_desc[j].length = BAS_NORMFRAME; + urb->iso_frame_desc[j].status = 0; + urb->iso_frame_desc[j].actual_length = 0; + } + ubc->isoouturbs[k].limit = -1; + } + + /* submit two URBs, keep third one */ + for (k = 0; k < 2; ++k) { + dump_urb(DEBUG_ISO, "Initial isoc write", urb); + rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC); + if (rc != 0) { + err("could not submit isochronous write URB %d: %s", + k, get_usb_statmsg(rc)); + goto error; + } + } + dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb); + ubc->isooutfree = &ubc->isoouturbs[2]; + ubc->isooutdone = ubc->isooutovfl = NULL; + return 0; + error: + stopurbs(ubc); + return rc; +} + +/* stopurbs + * cancel the USB request blocks for isochronous input and output + * errors are silently ignored + * argument: + * B channel control structure + */ +static void stopurbs(struct bas_bc_state *ubc) +{ + int k, rc; + + IFNULLRET(ubc); + + atomic_set(&ubc->running, 0); + + for (k = 0; k < BAS_INURBS; ++k) { + rc = usb_unlink_urb(ubc->isoinurbs[k]); + dbg(DEBUG_ISO, "%s: isoc input URB %d unlinked, result = %d", + __func__, k, rc); + } + + for (k = 0; k < BAS_OUTURBS; ++k) { + rc = usb_unlink_urb(ubc->isoouturbs[k].urb); + dbg(DEBUG_ISO, "%s: isoc output URB %d unlinked, result = %d", + __func__, k, rc); + } +} + +/* Isochronous Write - Bottom Half */ +/* =============================== */ + +/* submit_iso_write_urb + * fill and submit the next isochronous write URB + * parameters: + * bcs B channel state structure + * return value: + * number of frames submitted in URB + * 0 if URB not submitted because no data available (isooutbuf busy) + * error code < 0 on error + */ +static int submit_iso_write_urb(struct isow_urbctx_t *ucx) +{ + struct urb *urb; + struct bas_bc_state *ubc; + struct usb_iso_packet_descriptor *ifd; + int corrbytes, nframe, rc; + + IFNULLRETVAL(ucx, -EFAULT); + urb = ucx->urb; + IFNULLRETVAL(urb, -EFAULT); + IFNULLRETVAL(ucx->bcs, -EFAULT); + ubc = ucx->bcs->hw.bas; + IFNULLRETVAL(ubc, -EFAULT); + + urb->dev = ucx->bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = ubc->isooutbuf->data; + urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); + + for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) { + ifd = &urb->iso_frame_desc[nframe]; + + /* compute frame length according to flow control */ + ifd->length = BAS_NORMFRAME; + if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) { + dbg(DEBUG_ISO, "%s: corrbytes=%d", __func__, corrbytes); + if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME) + corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME; + else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME) + corrbytes = BAS_LOWFRAME - BAS_NORMFRAME; + ifd->length += corrbytes; + atomic_add(-corrbytes, &ubc->corrbytes); + } + //dbg(DEBUG_ISO, "%s: frame %d length=%d", __func__, nframe, ifd->length); + + /* retrieve block of data to send */ + ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length); + if (ifd->offset < 0) { + if (ifd->offset == -EBUSY) { + dbg(DEBUG_ISO, "%s: buffer busy at frame %d", + __func__, nframe); + /* tasklet will be restarted from gigaset_send_skb() */ + } else { + err("%s: buffer error %d at frame %d", + __func__, ifd->offset, nframe); + return ifd->offset; + } + break; + } + ucx->limit = atomic_read(&ubc->isooutbuf->nextread); + ifd->status = 0; + ifd->actual_length = 0; + } + if ((urb->number_of_packets = nframe) > 0) { + if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { + err("could not submit isochronous write URB: %s", + get_usb_statmsg(rc)); + dump_urb(DEBUG_ISO, "isoc write", urb); + return rc; + } + ++ubc->numsub; + } + return nframe; +} + +/* write_iso_tasklet + * tasklet scheduled when an isochronous output URB from the Gigaset device + * has completed + * parameter: + * data B channel state structure + */ +static void write_iso_tasklet(unsigned long data) +{ + struct bc_state *bcs; + struct bas_bc_state *ubc; + struct cardstate *cs; + struct isow_urbctx_t *done, *next, *ovfl; + struct urb *urb; + struct usb_iso_packet_descriptor *ifd; + int offset; + unsigned long flags; + int i; + struct sk_buff *skb; + int len; + + bcs = (struct bc_state *) data; + IFNULLRET(bcs); + ubc = bcs->hw.bas; + IFNULLRET(ubc); + cs = bcs->cs; + IFNULLRET(cs); + + /* loop while completed URBs arrive in time */ + for (;;) { + if (unlikely(!atomic_read(&cs->connected))) { + warn("%s: disconnected", __func__); + return; + } + + if (unlikely(!(atomic_read(&ubc->running)))) { + dbg(DEBUG_ISO, "%s: not running", __func__); + return; + } + + /* retrieve completed URBs */ + spin_lock_irqsave(&ubc->isooutlock, flags); + done = ubc->isooutdone; + ubc->isooutdone = NULL; + ovfl = ubc->isooutovfl; + ubc->isooutovfl = NULL; + spin_unlock_irqrestore(&ubc->isooutlock, flags); + if (ovfl) { + err("isochronous write buffer underrun - buy a faster machine :-)"); + error_hangup(bcs); + break; + } + if (!done) + break; + + /* submit free URB if available */ + spin_lock_irqsave(&ubc->isooutlock, flags); + next = ubc->isooutfree; + ubc->isooutfree = NULL; + spin_unlock_irqrestore(&ubc->isooutlock, flags); + if (next) { + if (submit_iso_write_urb(next) <= 0) { + /* could not submit URB, put it back */ + spin_lock_irqsave(&ubc->isooutlock, flags); + if (ubc->isooutfree == NULL) { + ubc->isooutfree = next; + next = NULL; + } + spin_unlock_irqrestore(&ubc->isooutlock, flags); + if (next) { + /* couldn't put it back */ + err("losing isochronous write URB"); + error_hangup(bcs); + } + } + } + + /* process completed URB */ + urb = done->urb; + switch (urb->status) { + case 0: /* normal completion */ + break; + case -EXDEV: /* inspect individual frames */ + /* assumptions (for lack of documentation): + * - actual_length bytes of the frame in error are successfully sent + * - all following frames are not sent at all + */ + dbg(DEBUG_ISO, "%s: URB partially completed", __func__); + offset = done->limit; /* just in case */ + for (i = 0; i < BAS_NUMFRAMES; i++) { + ifd = &urb->iso_frame_desc[i]; + if (ifd->status || + ifd->actual_length != ifd->length) { + warn("isochronous write: frame %d: %s, " + "only %d of %d bytes sent", + i, get_usb_statmsg(ifd->status), + ifd->actual_length, ifd->length); + offset = (ifd->offset + + ifd->actual_length) + % BAS_OUTBUFSIZE; + break; + } + } +#ifdef CONFIG_GIGASET_DEBUG + /* check assumption on remaining frames */ + for (; i < BAS_NUMFRAMES; i++) { + ifd = &urb->iso_frame_desc[i]; + if (ifd->status != -EINPROGRESS + || ifd->actual_length != 0) { + warn("isochronous write: frame %d: %s, " + "%d of %d bytes sent", + i, get_usb_statmsg(ifd->status), + ifd->actual_length, ifd->length); + offset = (ifd->offset + + ifd->actual_length) + % BAS_OUTBUFSIZE; + break; + } + } +#endif + break; + case -EPIPE: //FIXME is this the code for "underrun"? + err("isochronous write stalled"); + error_hangup(bcs); + break; + default: /* severe trouble */ + warn("isochronous write: %s", + get_usb_statmsg(urb->status)); + } + + /* mark the write buffer area covered by this URB as free */ + if (done->limit >= 0) + atomic_set(&ubc->isooutbuf->read, done->limit); + + /* mark URB as free */ + spin_lock_irqsave(&ubc->isooutlock, flags); + next = ubc->isooutfree; + ubc->isooutfree = done; + spin_unlock_irqrestore(&ubc->isooutlock, flags); + if (next) { + /* only one URB still active - resubmit one */ + if (submit_iso_write_urb(next) <= 0) { + /* couldn't submit */ + error_hangup(bcs); + } + } + } + + /* process queued SKBs */ + while ((skb = skb_dequeue(&bcs->squeue))) { + /* copy to output buffer, doing L2 encapsulation */ + len = skb->len; + if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) { + /* insufficient buffer space, push back onto queue */ + skb_queue_head(&bcs->squeue, skb); + dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d", + __func__, skb_queue_len(&bcs->squeue)); + break; + } + skb_pull(skb, len); + gigaset_skb_sent(bcs, skb); + dev_kfree_skb_any(skb); + } +} + +/* Isochronous Read - Bottom Half */ +/* ============================== */ + +/* read_iso_tasklet + * tasklet scheduled when an isochronous input URB from the Gigaset device + * has completed + * parameter: + * data B channel state structure + */ +static void read_iso_tasklet(unsigned long data) +{ + struct bc_state *bcs; + struct bas_bc_state *ubc; + struct cardstate *cs; + struct urb *urb; + char *rcvbuf; + unsigned long flags; + int totleft, numbytes, offset, frame, rc; + + bcs = (struct bc_state *) data; + IFNULLRET(bcs); + ubc = bcs->hw.bas; + IFNULLRET(ubc); + cs = bcs->cs; + IFNULLRET(cs); + + /* loop while more completed URBs arrive in the meantime */ + for (;;) { + if (!atomic_read(&cs->connected)) { + warn("%s: disconnected", __func__); + return; + } + + /* retrieve URB */ + spin_lock_irqsave(&ubc->isoinlock, flags); + if (!(urb = ubc->isoindone)) { + spin_unlock_irqrestore(&ubc->isoinlock, flags); + return; + } + ubc->isoindone = NULL; + if (unlikely(ubc->loststatus != -EINPROGRESS)) { + warn("isochronous read overrun, dropped URB with status: %s, %d bytes lost", + get_usb_statmsg(ubc->loststatus), ubc->isoinlost); + ubc->loststatus = -EINPROGRESS; + } + spin_unlock_irqrestore(&ubc->isoinlock, flags); + + if (unlikely(!(atomic_read(&ubc->running)))) { + dbg(DEBUG_ISO, "%s: channel not running, dropped URB with status: %s", + __func__, get_usb_statmsg(urb->status)); + return; + } + + switch (urb->status) { + case 0: /* normal completion */ + break; + case -EXDEV: /* inspect individual frames (we do that anyway) */ + dbg(DEBUG_ISO, "%s: URB partially completed", __func__); + break; + case -ENOENT: + case -ECONNRESET: + dbg(DEBUG_ISO, "%s: URB canceled", __func__); + continue; /* -> skip */ + case -EINPROGRESS: /* huh? */ + dbg(DEBUG_ISO, "%s: URB still pending", __func__); + continue; /* -> skip */ + case -EPIPE: + err("isochronous read stalled"); + error_hangup(bcs); + continue; /* -> skip */ + default: /* severe trouble */ + warn("isochronous read: %s", + get_usb_statmsg(urb->status)); + goto error; + } + + rcvbuf = urb->transfer_buffer; + totleft = urb->actual_length; + for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { + if (unlikely(urb->iso_frame_desc[frame].status)) { + warn("isochronous read: frame %d: %s", + frame, get_usb_statmsg(urb->iso_frame_desc[frame].status)); + break; + } + numbytes = urb->iso_frame_desc[frame].actual_length; + if (unlikely(numbytes > BAS_MAXFRAME)) { + warn("isochronous read: frame %d: numbytes (%d) > BAS_MAXFRAME", + frame, numbytes); + break; + } + if (unlikely(numbytes > totleft)) { + warn("isochronous read: frame %d: numbytes (%d) > totleft (%d)", + frame, numbytes, totleft); + break; + } + offset = urb->iso_frame_desc[frame].offset; + if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { + warn("isochronous read: frame %d: offset (%d) + numbytes (%d) > BAS_INBUFSIZE", + frame, offset, numbytes); + break; + } + gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); + totleft -= numbytes; + } + if (unlikely(totleft > 0)) + warn("isochronous read: %d data bytes missing", + totleft); + + error: + /* URB processed, resubmit */ + for (frame = 0; frame < BAS_NUMFRAMES; frame++) { + urb->iso_frame_desc[frame].status = 0; + urb->iso_frame_desc[frame].actual_length = 0; + } + urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ + urb->transfer_flags = URB_ISO_ASAP; + urb->number_of_packets = BAS_NUMFRAMES; + if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { + err("could not resubmit isochronous read URB: %s", + get_usb_statmsg(rc)); + dump_urb(DEBUG_ISO, "resubmit iso read", urb); + error_hangup(bcs); + } + } +} + +/* Channel Operations */ +/* ================== */ + +/* req_timeout + * timeout routine for control output request + * argument: + * B channel control structure + */ +static void req_timeout(unsigned long data) +{ + struct bc_state *bcs = (struct bc_state *) data; + struct bas_cardstate *ucs; + int pending; + unsigned long flags; + + IFNULLRET(bcs); + IFNULLRET(bcs->cs); + ucs = bcs->cs->hw.bas; + IFNULLRET(ucs); + + check_pending(ucs); + + spin_lock_irqsave(&ucs->lock, flags); + pending = ucs->pending; + ucs->pending = 0; + spin_unlock_irqrestore(&ucs->lock, flags); + + switch (pending) { + case 0: /* no pending request */ + dbg(DEBUG_USBREQ, "%s: no request pending", __func__); + break; + + case HD_OPEN_ATCHANNEL: + err("timeout opening AT channel"); + error_reset(bcs->cs); + break; + + case HD_OPEN_B2CHANNEL: + case HD_OPEN_B1CHANNEL: + err("timeout opening channel %d", bcs->channel + 1); + error_hangup(bcs); + break; + + case HD_CLOSE_ATCHANNEL: + err("timeout closing AT channel"); + //wake_up_interruptible(cs->initwait); + //FIXME need own wait queue? + break; + + case HD_CLOSE_B2CHANNEL: + case HD_CLOSE_B1CHANNEL: + err("timeout closing channel %d", bcs->channel + 1); + break; + + default: + warn("request 0x%02x timed out, clearing", pending); + } +} + +/* write_ctrl_callback + * USB completion handler for control pipe output + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block of completed request + * urb->context = hardware specific controller state structure + */ +static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs) +{ + struct bas_cardstate *ucs; + unsigned long flags; + + IFNULLRET(urb); + IFNULLRET(urb->context); + IFNULLRET(cardstate); + + ucs = (struct bas_cardstate *) urb->context; + spin_lock_irqsave(&ucs->lock, flags); + if (urb->status && ucs->pending) { + err("control request 0x%02x failed: %s", + ucs->pending, get_usb_statmsg(urb->status)); + del_timer(&ucs->timer_ctrl); + ucs->pending = 0; + } + /* individual handling of specific request types */ + switch (ucs->pending) { + case HD_DEVICE_INIT_ACK: /* no reply expected */ + ucs->pending = 0; + break; + } + spin_unlock_irqrestore(&ucs->lock, flags); +} + +/* req_submit + * submit a control output request without message buffer to the Gigaset base + * and optionally start a timeout + * parameters: + * bcs B channel control structure + * req control request code (HD_*) + * val control request parameter value (set to 0 if unused) + * timeout timeout in seconds (0: no timeout) + * return value: + * 0 on success + * -EINVAL if a NULL pointer is encountered somewhere + * -EBUSY if another request is pending + * any URB submission error code + */ +static int req_submit(struct bc_state *bcs, int req, int val, int timeout) +{ + struct bas_cardstate *ucs; + int ret; + unsigned long flags; + + IFNULLRETVAL(bcs, -EINVAL); + IFNULLRETVAL(bcs->cs, -EINVAL); + ucs = bcs->cs->hw.bas; + IFNULLRETVAL(ucs, -EINVAL); + IFNULLRETVAL(ucs->urb_ctrl, -EINVAL); + + dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val); + + spin_lock_irqsave(&ucs->lock, flags); + if (ucs->pending) { + spin_unlock_irqrestore(&ucs->lock, flags); + err("submission of request 0x%02x failed: request 0x%02x still pending", + req, ucs->pending); + return -EBUSY; + } + if (ucs->urb_ctrl->status == -EINPROGRESS) { + spin_unlock_irqrestore(&ucs->lock, flags); + err("could not submit request 0x%02x: URB busy", req); + return -EBUSY; + } + + ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ; + ucs->dr_ctrl.bRequest = req; + ucs->dr_ctrl.wValue = cpu_to_le16(val); + ucs->dr_ctrl.wIndex = 0; + ucs->dr_ctrl.wLength = 0; + usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, + usb_sndctrlpipe(ucs->udev, 0), + (unsigned char*) &ucs->dr_ctrl, NULL, 0, + write_ctrl_callback, ucs); + if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) { + err("could not submit request 0x%02x: %s", + req, get_usb_statmsg(ret)); + spin_unlock_irqrestore(&ucs->lock, flags); + return ret; + } + ucs->pending = req; + + if (timeout > 0) { + dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); + ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10; + ucs->timer_ctrl.data = (unsigned long) bcs; + ucs->timer_ctrl.function = req_timeout; + add_timer(&ucs->timer_ctrl); + } + + spin_unlock_irqrestore(&ucs->lock, flags); + return 0; +} + +/* gigaset_init_bchannel + * called by common.c to connect a B channel + * initialize isochronous I/O and tell the Gigaset base to open the channel + * argument: + * B channel control structure + * return value: + * 0 on success, error code < 0 on error + */ +static int gigaset_init_bchannel(struct bc_state *bcs) +{ + int req, ret; + + IFNULLRETVAL(bcs, -EINVAL); + + if ((ret = starturbs(bcs)) < 0) { + err("could not start isochronous I/O for channel %d", + bcs->channel + 1); + error_hangup(bcs); + return ret; + } + + req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL; + if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) { + err("could not open channel %d: %s", + bcs->channel + 1, get_usb_statmsg(ret)); + stopurbs(bcs->hw.bas); + error_hangup(bcs); + } + return ret; +} + +/* gigaset_close_bchannel + * called by common.c to disconnect a B channel + * tell the Gigaset base to close the channel + * stopping isochronous I/O and LL notification will be done when the + * acknowledgement for the close arrives + * argument: + * B channel control structure + * return value: + * 0 on success, error code < 0 on error + */ +static int gigaset_close_bchannel(struct bc_state *bcs) +{ + int req, ret; + + IFNULLRETVAL(bcs, -EINVAL); + + if (!(atomic_read(&bcs->cs->hw.bas->basstate) & + (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) { + /* channel not running: just signal common.c */ + gigaset_bchannel_down(bcs); + return 0; + } + + req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL; + if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) + err("could not submit HD_CLOSE_BxCHANNEL request: %s", + get_usb_statmsg(ret)); + return ret; +} + +/* Device Operations */ +/* ================= */ + +/* complete_cb + * unqueue first command buffer from queue, waking any sleepers + * must be called with cs->cmdlock held + * parameter: + * cs controller state structure + */ +static void complete_cb(struct cardstate *cs) +{ + struct cmdbuf_t *cb; + + IFNULLRET(cs); + cb = cs->cmdbuf; + IFNULLRET(cb); + + /* unqueue completed buffer */ + cs->cmdbytes -= cs->curlen; + dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, + "write_command: sent %u bytes, %u left", + cs->curlen, cs->cmdbytes); + if ((cs->cmdbuf = cb->next) != NULL) { + cs->cmdbuf->prev = NULL; + cs->curlen = cs->cmdbuf->len; + } else { + cs->lastcmdbuf = NULL; + cs->curlen = 0; + } + + if (cb->wake_tasklet) + tasklet_schedule(cb->wake_tasklet); + + kfree(cb); +} + +static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len); + +/* write_command_callback + * USB completion handler for AT command transmission + * called by the USB subsystem in interrupt context + * parameter: + * urb USB request block of completed request + * urb->context = controller state structure + */ +static void write_command_callback(struct urb *urb, struct pt_regs *regs) +{ + struct cardstate *cs; + unsigned long flags; + struct bas_cardstate *ucs; + + IFNULLRET(urb); + cs = (struct cardstate *) urb->context; + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + /* check status */ + switch (urb->status) { + case 0: /* normal completion */ + break; + case -ENOENT: /* canceled */ + case -ECONNRESET: /* canceled (async) */ + case -EINPROGRESS: /* pending */ + /* ignore silently */ + dbg(DEBUG_USBREQ, + "%s: %s", __func__, get_usb_statmsg(urb->status)); + return; + default: /* any failure */ + if (++ucs->retry_cmd_out > BAS_RETRY) { + warn("command write: %s, giving up after %d retries", + get_usb_statmsg(urb->status), ucs->retry_cmd_out); + break; + } + if (cs->cmdbuf == NULL) { + warn("command write: %s, cannot retry - cmdbuf gone", + get_usb_statmsg(urb->status)); + break; + } + notice("command write: %s, retry %d", + get_usb_statmsg(urb->status), ucs->retry_cmd_out); + if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0) + /* resubmitted - bypass regular exit block */ + return; + /* command send failed, assume base still waiting */ + update_basstate(ucs, BS_ATREADY, 0); + } + + spin_lock_irqsave(&cs->cmdlock, flags); + if (cs->cmdbuf != NULL) + complete_cb(cs); + spin_unlock_irqrestore(&cs->cmdlock, flags); +} + +/* atrdy_timeout + * timeout routine for AT command transmission + * argument: + * controller state structure + */ +static void atrdy_timeout(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + struct bas_cardstate *ucs; + + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + warn("timeout waiting for HD_READY_SEND_ATDATA"); + + /* fake the missing signal - what else can I do? */ + update_basstate(ucs, BS_ATREADY, BS_ATTIMER); + start_cbsend(cs); +} + +/* atwrite_submit + * submit an HD_WRITE_ATMESSAGE command URB + * parameters: + * cs controller state structure + * buf buffer containing command to send + * len length of command to send + * return value: + * 0 on success + * -EFAULT if a NULL pointer is encountered somewhere + * -EBUSY if another request is pending + * any URB submission error code + */ +static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len) +{ + struct bas_cardstate *ucs; + int ret; + + IFNULLRETVAL(cs, -EFAULT); + ucs = cs->hw.bas; + IFNULLRETVAL(ucs, -EFAULT); + IFNULLRETVAL(ucs->urb_cmd_out, -EFAULT); + + dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len); + + if (ucs->urb_cmd_out->status == -EINPROGRESS) { + err("could not submit HD_WRITE_ATMESSAGE: URB busy"); + return -EBUSY; + } + + ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ; + ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE; + ucs->dr_cmd_out.wValue = 0; + ucs->dr_cmd_out.wIndex = 0; + ucs->dr_cmd_out.wLength = cpu_to_le16(len); + usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, + usb_sndctrlpipe(ucs->udev, 0), + (unsigned char*) &ucs->dr_cmd_out, buf, len, + write_command_callback, cs); + + if ((ret = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC)) != 0) { + err("could not submit HD_WRITE_ATMESSAGE: %s", + get_usb_statmsg(ret)); + return ret; + } + + /* submitted successfully */ + update_basstate(ucs, 0, BS_ATREADY); + + /* start timeout if necessary */ + if (!(atomic_read(&ucs->basstate) & BS_ATTIMER)) { + dbg(DEBUG_OUTPUT, + "setting ATREADY timeout of %d/10 secs", ATRDY_TIMEOUT); + ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10; + ucs->timer_atrdy.data = (unsigned long) cs; + ucs->timer_atrdy.function = atrdy_timeout; + add_timer(&ucs->timer_atrdy); + update_basstate(ucs, BS_ATTIMER, 0); + } + return 0; +} + +/* start_cbsend + * start transmission of AT command queue if necessary + * parameter: + * cs controller state structure + * return value: + * 0 on success + * error code < 0 on error + */ +static int start_cbsend(struct cardstate *cs) +{ + struct cmdbuf_t *cb; + struct bas_cardstate *ucs; + unsigned long flags; + int rc; + int retval = 0; + + IFNULLRETVAL(cs, -EFAULT); + ucs = cs->hw.bas; + IFNULLRETVAL(ucs, -EFAULT); + + /* check if AT channel is open */ + if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) { + dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, "AT channel not open"); + rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT); + if (rc < 0) { + err("could not open AT channel"); + /* flush command queue */ + spin_lock_irqsave(&cs->cmdlock, flags); + while (cs->cmdbuf != NULL) + complete_cb(cs); + spin_unlock_irqrestore(&cs->cmdlock, flags); + } + return rc; + } + + /* try to send first command in queue */ + spin_lock_irqsave(&cs->cmdlock, flags); + + while ((cb = cs->cmdbuf) != NULL && + atomic_read(&ucs->basstate) & BS_ATREADY) { + ucs->retry_cmd_out = 0; + rc = atwrite_submit(cs, cb->buf, cb->len); + if (unlikely(rc)) { + retval = rc; + complete_cb(cs); + } + } + + spin_unlock_irqrestore(&cs->cmdlock, flags); + return retval; +} + +/* gigaset_write_cmd + * This function is called by the device independent part of the driver + * to transmit an AT command string to the Gigaset device. + * It encapsulates the device specific method for transmission over the + * direct USB connection to the base. + * The command string is added to the queue of commands to send, and + * USB transmission is started if necessary. + * parameters: + * cs controller state structure + * buf command string to send + * len number of bytes to send (max. IF_WRITEBUF) + * wake_tasklet tasklet to run when transmission is completed (NULL if none) + * return value: + * number of bytes queued on success + * error code < 0 on error + */ +static int gigaset_write_cmd(struct cardstate *cs, + const unsigned char *buf, int len, + struct tasklet_struct *wake_tasklet) +{ + struct cmdbuf_t *cb; + unsigned long flags; + int status; + + gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? + DEBUG_TRANSCMD : DEBUG_LOCKCMD, + "CMD Transmit", len, buf, 0); + + if (!atomic_read(&cs->connected)) { + err("%s: not connected", __func__); + return -ENODEV; + } + + if (len <= 0) + return 0; /* nothing to do */ + + if (len > IF_WRITEBUF) + len = IF_WRITEBUF; + if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { + err("%s: out of memory", __func__); + return -ENOMEM; + } + + memcpy(cb->buf, buf, len); + cb->len = len; + cb->offset = 0; + cb->next = NULL; + cb->wake_tasklet = wake_tasklet; + + spin_lock_irqsave(&cs->cmdlock, flags); + cb->prev = cs->lastcmdbuf; + if (cs->lastcmdbuf) + cs->lastcmdbuf->next = cb; + else { + cs->cmdbuf = cb; + cs->curlen = len; + } + cs->cmdbytes += len; + cs->lastcmdbuf = cb; + spin_unlock_irqrestore(&cs->cmdlock, flags); + + status = start_cbsend(cs); + + return status < 0 ? status : len; +} + +/* gigaset_write_room + * tty_driver.write_room interface routine + * return number of characters the driver will accept to be written via gigaset_write_cmd + * parameter: + * controller state structure + * return value: + * number of characters + */ +static int gigaset_write_room(struct cardstate *cs) +{ + return IF_WRITEBUF; +} + +/* gigaset_chars_in_buffer + * tty_driver.chars_in_buffer interface routine + * return number of characters waiting to be sent + * parameter: + * controller state structure + * return value: + * number of characters + */ +static int gigaset_chars_in_buffer(struct cardstate *cs) +{ + unsigned long flags; + unsigned bytes; + + spin_lock_irqsave(&cs->cmdlock, flags); + bytes = cs->cmdbytes; + spin_unlock_irqrestore(&cs->cmdlock, flags); + + return bytes; +} + +/* gigaset_brkchars + * implementation of ioctl(GIGASET_BRKCHARS) + * parameter: + * controller state structure + * return value: + * -EINVAL (unimplemented function) + */ +static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) +{ + return -EINVAL; +} + + +/* Device Initialization/Shutdown */ +/* ============================== */ + +/* Free hardware dependent part of the B channel structure + * parameter: + * bcs B channel structure + * return value: + * !=0 on success + */ +static int gigaset_freebcshw(struct bc_state *bcs) +{ + if (!bcs->hw.bas) + return 0; + + if (bcs->hw.bas->isooutbuf) + kfree(bcs->hw.bas->isooutbuf); + kfree(bcs->hw.bas); + bcs->hw.bas = NULL; + return 1; +} + +/* Initialize hardware dependent part of the B channel structure + * parameter: + * bcs B channel structure + * return value: + * !=0 on success + */ +static int gigaset_initbcshw(struct bc_state *bcs) +{ + int i; + struct bas_bc_state *ubc; + + bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); + if (!ubc) { + err("could not allocate bas_bc_state"); + return 0; + } + + atomic_set(&ubc->running, 0); + atomic_set(&ubc->corrbytes, 0); + spin_lock_init(&ubc->isooutlock); + for (i = 0; i < BAS_OUTURBS; ++i) { + ubc->isoouturbs[i].urb = NULL; + ubc->isoouturbs[i].bcs = bcs; + } + ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL; + ubc->numsub = 0; + if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) { + err("could not allocate isochronous output buffer"); + kfree(ubc); + bcs->hw.bas = NULL; + return 0; + } + tasklet_init(&ubc->sent_tasklet, + &write_iso_tasklet, (unsigned long) bcs); + + spin_lock_init(&ubc->isoinlock); + for (i = 0; i < BAS_INURBS; ++i) + ubc->isoinurbs[i] = NULL; + ubc->isoindone = NULL; + ubc->loststatus = -EINPROGRESS; + ubc->isoinlost = 0; + ubc->seqlen = 0; + ubc->inbyte = 0; + ubc->inbits = 0; + ubc->goodbytes = 0; + ubc->alignerrs = 0; + ubc->fcserrs = 0; + ubc->frameerrs = 0; + ubc->giants = 0; + ubc->runts = 0; + ubc->aborts = 0; + ubc->shared0s = 0; + ubc->stolen0s = 0; + tasklet_init(&ubc->rcvd_tasklet, + &read_iso_tasklet, (unsigned long) bcs); + return 1; +} + +static void gigaset_reinitbcshw(struct bc_state *bcs) +{ + struct bas_bc_state *ubc = bcs->hw.bas; + + atomic_set(&bcs->hw.bas->running, 0); + atomic_set(&bcs->hw.bas->corrbytes, 0); + bcs->hw.bas->numsub = 0; + spin_lock_init(&ubc->isooutlock); + spin_lock_init(&ubc->isoinlock); + ubc->loststatus = -EINPROGRESS; +} + +static void gigaset_freecshw(struct cardstate *cs) +{ + struct bas_cardstate *ucs = cs->hw.bas; + + del_timer(&ucs->timer_ctrl); + del_timer(&ucs->timer_atrdy); + del_timer(&ucs->timer_cmd_in); + + kfree(cs->hw.bas); +} + +static int gigaset_initcshw(struct cardstate *cs) +{ + struct bas_cardstate *ucs; + + cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); + if (!ucs) + return 0; + + ucs->urb_cmd_in = NULL; + ucs->urb_cmd_out = NULL; + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + + spin_lock_init(&ucs->lock); + ucs->pending = 0; + + atomic_set(&ucs->basstate, 0); + init_timer(&ucs->timer_ctrl); + init_timer(&ucs->timer_atrdy); + init_timer(&ucs->timer_cmd_in); + + return 1; +} + +/* freeurbs + * unlink and deallocate all URBs unconditionally + * caller must make sure that no commands are still in progress + * parameter: + * cs controller state structure + */ +static void freeurbs(struct cardstate *cs) +{ + struct bas_cardstate *ucs; + struct bas_bc_state *ubc; + int i, j; + + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + for (j = 0; j < 2; ++j) { + ubc = cs->bcs[j].hw.bas; + IFNULLCONT(ubc); + for (i = 0; i < BAS_OUTURBS; ++i) + if (ubc->isoouturbs[i].urb) { + usb_kill_urb(ubc->isoouturbs[i].urb); + dbg(DEBUG_INIT, + "%s: isoc output URB %d/%d unlinked", + __func__, j, i); + usb_free_urb(ubc->isoouturbs[i].urb); + ubc->isoouturbs[i].urb = NULL; + } + for (i = 0; i < BAS_INURBS; ++i) + if (ubc->isoinurbs[i]) { + usb_kill_urb(ubc->isoinurbs[i]); + dbg(DEBUG_INIT, + "%s: isoc input URB %d/%d unlinked", + __func__, j, i); + usb_free_urb(ubc->isoinurbs[i]); + ubc->isoinurbs[i] = NULL; + } + } + if (ucs->urb_int_in) { + usb_kill_urb(ucs->urb_int_in); + dbg(DEBUG_INIT, "%s: interrupt input URB unlinked", __func__); + usb_free_urb(ucs->urb_int_in); + ucs->urb_int_in = NULL; + } + if (ucs->urb_cmd_out) { + usb_kill_urb(ucs->urb_cmd_out); + dbg(DEBUG_INIT, "%s: command output URB unlinked", __func__); + usb_free_urb(ucs->urb_cmd_out); + ucs->urb_cmd_out = NULL; + } + if (ucs->urb_cmd_in) { + usb_kill_urb(ucs->urb_cmd_in); + dbg(DEBUG_INIT, "%s: command input URB unlinked", __func__); + usb_free_urb(ucs->urb_cmd_in); + ucs->urb_cmd_in = NULL; + } + if (ucs->urb_ctrl) { + usb_kill_urb(ucs->urb_ctrl); + dbg(DEBUG_INIT, "%s: control output URB unlinked", __func__); + usb_free_urb(ucs->urb_ctrl); + ucs->urb_ctrl = NULL; + } +} + +/* gigaset_probe + * This function is called when a new USB device is connected. + * It checks whether the new device is handled by this driver. + */ +static int gigaset_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct usb_host_interface *hostif; + struct usb_device *udev = interface_to_usbdev(interface); + struct cardstate *cs = NULL; + struct bas_cardstate *ucs = NULL; + struct bas_bc_state *ubc; + struct usb_endpoint_descriptor *endpoint; + int i, j; + int ret; + + IFNULLRETVAL(udev, -ENODEV); + + dbg(DEBUG_ANY, + "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)", + __func__, le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + + /* See if the device offered us matches what we can accept */ + if ((le16_to_cpu(udev->descriptor.idVendor) != USB_GIGA_VENDOR_ID) || + (le16_to_cpu(udev->descriptor.idProduct) != USB_GIGA_PRODUCT_ID && + le16_to_cpu(udev->descriptor.idProduct) != USB_4175_PRODUCT_ID && + le16_to_cpu(udev->descriptor.idProduct) != USB_SX303_PRODUCT_ID && + le16_to_cpu(udev->descriptor.idProduct) != USB_SX353_PRODUCT_ID)) { + dbg(DEBUG_ANY, "%s: unmatched ID - exiting", __func__); + return -ENODEV; + } + + /* set required alternate setting */ + hostif = interface->cur_altsetting; + if (hostif->desc.bAlternateSetting != 3) { + dbg(DEBUG_ANY, + "%s: wrong alternate setting %d - trying to switch", + __func__, hostif->desc.bAlternateSetting); + if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) { + warn("usb_set_interface failed, device %d interface %d altsetting %d", + udev->devnum, hostif->desc.bInterfaceNumber, + hostif->desc.bAlternateSetting); + return -ENODEV; + } + hostif = interface->cur_altsetting; + } + + /* Reject application specific interfaces + */ + if (hostif->desc.bInterfaceClass != 255) { + warn("%s: bInterfaceClass == %d", + __func__, hostif->desc.bInterfaceClass); + return -ENODEV; + } + + info("%s: Device matched (Vendor: 0x%x, Product: 0x%x)", + __func__, le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + + cs = gigaset_getunassignedcs(driver); + if (!cs) { + err("%s: no free cardstate", __func__); + return -ENODEV; + } + ucs = cs->hw.bas; + ucs->udev = udev; + ucs->interface = interface; + + /* allocate URBs: + * - one for the interrupt pipe + * - three for the different uses of the default control pipe + * - three for each isochronous pipe + */ + ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->urb_int_in) { + err("No free urbs available"); + goto error; + } + ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->urb_cmd_in) { + err("No free urbs available"); + goto error; + } + ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->urb_cmd_out) { + err("No free urbs available"); + goto error; + } + ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->urb_ctrl) { + err("No free urbs available"); + goto error; + } + + for (j = 0; j < 2; ++j) { + ubc = cs->bcs[j].hw.bas; + for (i = 0; i < BAS_OUTURBS; ++i) { + ubc->isoouturbs[i].urb = + usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL); + if (!ubc->isoouturbs[i].urb) { + err("No free urbs available"); + goto error; + } + } + for (i = 0; i < BAS_INURBS; ++i) { + ubc->isoinurbs[i] = + usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL); + if (!ubc->isoinurbs[i]) { + err("No free urbs available"); + goto error; + } + } + } + + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + + /* Fill the interrupt urb and send it to the core */ + endpoint = &hostif->endpoint[0].desc; + usb_fill_int_urb(ucs->urb_int_in, udev, + usb_rcvintpipe(udev, + (endpoint->bEndpointAddress) & 0x0f), + ucs->int_in_buf, 3, read_int_callback, cs, + endpoint->bInterval); + ret = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL); + if (ret) { + err("could not submit interrupt URB: %s", get_usb_statmsg(ret)); + goto error; + } + + /* tell the device that the driver is ready */ + if ((ret = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0) + goto error; + + /* tell common part that the device is ready */ + if (startmode == SM_LOCKED) + atomic_set(&cs->mstate, MS_LOCKED); + if (!gigaset_start(cs)) + goto error; + + /* save address of controller structure */ + usb_set_intfdata(interface, cs); + + /* set up device sysfs */ + gigaset_init_dev_sysfs(interface); + return 0; + +error: + freeurbs(cs); + gigaset_unassign(cs); + return -ENODEV; +} + +/* gigaset_disconnect + * This function is called when the Gigaset base is unplugged. + */ +static void gigaset_disconnect(struct usb_interface *interface) +{ + struct cardstate *cs; + struct bas_cardstate *ucs; + + /* clear device sysfs */ + gigaset_free_dev_sysfs(interface); + + cs = usb_get_intfdata(interface); + usb_set_intfdata(interface, NULL); + + IFNULLRET(cs); + ucs = cs->hw.bas; + IFNULLRET(ucs); + + info("disconnecting GigaSet base"); + gigaset_stop(cs); + freeurbs(cs); + kfree(ucs->rcvbuf); + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + atomic_set(&ucs->basstate, 0); + gigaset_unassign(cs); +} + +static struct gigaset_ops gigops = { + gigaset_write_cmd, + gigaset_write_room, + gigaset_chars_in_buffer, + gigaset_brkchars, + gigaset_init_bchannel, + gigaset_close_bchannel, + gigaset_initbcshw, + gigaset_freebcshw, + gigaset_reinitbcshw, + gigaset_initcshw, + gigaset_freecshw, + gigaset_set_modem_ctrl, + gigaset_baud_rate, + gigaset_set_line_ctrl, + gigaset_isoc_send_skb, + gigaset_isoc_input, +}; + +/* bas_gigaset_init + * This function is called after the kernel module is loaded. + */ +static int __init bas_gigaset_init(void) +{ + int result; + + /* allocate memory for our driver state and intialize it */ + if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, + GIGASET_MODULENAME, GIGASET_DEVNAME, + GIGASET_DEVFSNAME, &gigops, + THIS_MODULE)) == NULL) + goto error; + + /* allocate memory for our device state and intialize it */ + cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode, GIGASET_MODULENAME); + if (!cardstate) + goto error; + + /* register this driver with the USB subsystem */ + result = usb_register(&gigaset_usb_driver); + if (result < 0) { + err("usb_register failed (error %d)", -result); + goto error; + } + + info(DRIVER_AUTHOR); + info(DRIVER_DESC); + return 0; + +error: if (cardstate) + gigaset_freecs(cardstate); + cardstate = NULL; + if (driver) + gigaset_freedriver(driver); + driver = NULL; + return -1; +} + +/* bas_gigaset_exit + * This function is called before the kernel module is unloaded. + */ +static void __exit bas_gigaset_exit(void) +{ + gigaset_blockdriver(driver); /* => probe will fail + * => no gigaset_start any more + */ + + gigaset_shutdown(cardstate); + /* from now on, no isdn callback should be possible */ + + if (atomic_read(&cardstate->hw.bas->basstate) & BS_ATOPEN) { + dbg(DEBUG_ANY, "closing AT channel"); + if (req_submit(cardstate->bcs, + HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT) >= 0) { + /* successfully submitted - wait for completion */ + //wait_event_interruptible(cs->initwait, !cs->hw.bas->pending); + //FIXME need own wait queue? wakeup? + } + } + + /* deregister this driver with the USB subsystem */ + usb_deregister(&gigaset_usb_driver); + /* this will call the disconnect-callback */ + /* from now on, no disconnect/probe callback should be running */ + + gigaset_freecs(cardstate); + cardstate = NULL; + gigaset_freedriver(driver); + driver = NULL; +} + + +module_init(bas_gigaset_init); +module_exit(bas_gigaset_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c new file mode 100644 index 000000000000..64371995c1a9 --- /dev/null +++ b/drivers/isdn/gigaset/common.c @@ -0,0 +1,1203 @@ +/* + * Stuff used by all variants of the driver + * + * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, + * Hansjoerg Lipp <hjlipp@web.de>, + * Tilman Schmidt <tilman@imap.cc>. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: common.c,v 1.104.4.22 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +/* Version Information */ +#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers <Eilers.Stefan@epost.de>" +#define DRIVER_DESC "Driver for Gigaset 307x" + +/* Module parameters */ +int gigaset_debuglevel = DEBUG_DEFAULT; +EXPORT_SYMBOL_GPL(gigaset_debuglevel); +module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug, "debug level"); + +/*====================================================================== + Prototypes of internal functions + */ + +//static void gigaset_process_response(int resp_code, int parameter, +// struct at_state_t *at_state, +// unsigned char ** pstring); +static struct cardstate *alloc_cs(struct gigaset_driver *drv); +static void free_cs(struct cardstate *cs); +static void make_valid(struct cardstate *cs, unsigned mask); +static void make_invalid(struct cardstate *cs, unsigned mask); + +#define VALID_MINOR 0x01 +#define VALID_ID 0x02 +#define ASSIGNED 0x04 + +/* bitwise byte inversion table */ +__u8 gigaset_invtab[256] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff +}; +EXPORT_SYMBOL_GPL(gigaset_invtab); + +void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, + size_t len, const unsigned char *buf, int from_user) +{ + unsigned char outbuf[80]; + unsigned char inbuf[80 - 1]; + size_t numin; + const unsigned char *in; + size_t space = sizeof outbuf - 1; + unsigned char *out = outbuf; + + if (!from_user) { + in = buf; + numin = len; + } else { + numin = len < sizeof inbuf ? len : sizeof inbuf; + in = inbuf; + if (copy_from_user(inbuf, (const unsigned char __user *) buf, numin)) { + strncpy(inbuf, "<FAULT>", sizeof inbuf); + numin = sizeof "<FAULT>" - 1; + } + } + + for (; numin && space; --numin, ++in) { + --space; + if (*in >= 32) + *out++ = *in; + else { + *out++ = '^'; + if (space) { + *out++ = '@' + *in; + --space; + } + } + } + *out = 0; + + dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf); +} +EXPORT_SYMBOL_GPL(gigaset_dbg_buffer); + +static int setflags(struct cardstate *cs, unsigned flags, unsigned delay) +{ + int r; + + r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags); + cs->control_state = flags; + if (r < 0) + return r; + + if (delay) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(delay * HZ / 1000); + } + + return 0; +} + +int gigaset_enterconfigmode(struct cardstate *cs) +{ + int i, r; + + if (!atomic_read(&cs->connected)) { + err("not connected!"); + return -1; + } + + cs->control_state = TIOCM_RTS; //FIXME + + r = setflags(cs, TIOCM_DTR, 200); + if (r < 0) + goto error; + r = setflags(cs, 0, 200); + if (r < 0) + goto error; + for (i = 0; i < 5; ++i) { + r = setflags(cs, TIOCM_RTS, 100); + if (r < 0) + goto error; + r = setflags(cs, 0, 100); + if (r < 0) + goto error; + } + r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800); + if (r < 0) + goto error; + + return 0; + +error: + err("error %d on setuartbits!\n", -r); + cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value? + cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); + + return -1; //r +} + +static int test_timeout(struct at_state_t *at_state) +{ + if (!at_state->timer_expires) + return 0; + + if (--at_state->timer_expires) { + dbg(DEBUG_MCMD, "decreased timer of %p to %lu", + at_state, at_state->timer_expires); + return 0; + } + + if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, + atomic_read(&at_state->timer_index), NULL)) { + //FIXME what should we do? + } + + return 1; +} + +static void timer_tick(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + unsigned long flags; + unsigned channel; + struct at_state_t *at_state; + int timeout = 0; + + spin_lock_irqsave(&cs->lock, flags); + + for (channel = 0; channel < cs->channels; ++channel) + if (test_timeout(&cs->bcs[channel].at_state)) + timeout = 1; + + if (test_timeout(&cs->at_state)) + timeout = 1; + + list_for_each_entry(at_state, &cs->temp_at_states, list) + if (test_timeout(at_state)) + timeout = 1; + + if (atomic_read(&cs->running)) { + mod_timer(&cs->timer, jiffies + GIG_TICK); + if (timeout) { + dbg(DEBUG_CMD, "scheduling timeout"); + tasklet_schedule(&cs->event_tasklet); + } + } + + spin_unlock_irqrestore(&cs->lock, flags); +} + +int gigaset_get_channel(struct bc_state *bcs) +{ + unsigned long flags; + + spin_lock_irqsave(&bcs->cs->lock, flags); + if (bcs->use_count) { + dbg(DEBUG_ANY, "could not allocate channel %d", bcs->channel); + spin_unlock_irqrestore(&bcs->cs->lock, flags); + return 0; + } + ++bcs->use_count; + bcs->busy = 1; + dbg(DEBUG_ANY, "allocated channel %d", bcs->channel); + spin_unlock_irqrestore(&bcs->cs->lock, flags); + return 1; +} + +void gigaset_free_channel(struct bc_state *bcs) +{ + unsigned long flags; + + spin_lock_irqsave(&bcs->cs->lock, flags); + if (!bcs->busy) { + dbg(DEBUG_ANY, "could not free channel %d", bcs->channel); + spin_unlock_irqrestore(&bcs->cs->lock, flags); + return; + } + --bcs->use_count; + bcs->busy = 0; + dbg(DEBUG_ANY, "freed channel %d", bcs->channel); + spin_unlock_irqrestore(&bcs->cs->lock, flags); +} + +int gigaset_get_channels(struct cardstate *cs) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&cs->lock, flags); + for (i = 0; i < cs->channels; ++i) + if (cs->bcs[i].use_count) { + spin_unlock_irqrestore(&cs->lock, flags); + dbg(DEBUG_ANY, "could not allocated all channels"); + return 0; + } + for (i = 0; i < cs->channels; ++i) + ++cs->bcs[i].use_count; + spin_unlock_irqrestore(&cs->lock, flags); + + dbg(DEBUG_ANY, "allocated all channels"); + + return 1; +} + +void gigaset_free_channels(struct cardstate *cs) +{ + unsigned long flags; + int i; + + dbg(DEBUG_ANY, "unblocking all channels"); + spin_lock_irqsave(&cs->lock, flags); + for (i = 0; i < cs->channels; ++i) + --cs->bcs[i].use_count; + spin_unlock_irqrestore(&cs->lock, flags); +} + +void gigaset_block_channels(struct cardstate *cs) +{ + unsigned long flags; + int i; + + dbg(DEBUG_ANY, "blocking all channels"); + spin_lock_irqsave(&cs->lock, flags); + for (i = 0; i < cs->channels; ++i) + ++cs->bcs[i].use_count; + spin_unlock_irqrestore(&cs->lock, flags); +} + +static void clear_events(struct cardstate *cs) +{ + struct event_t *ev; + unsigned head, tail; + + /* no locking needed (no reader/writer allowed) */ + + head = atomic_read(&cs->ev_head); + tail = atomic_read(&cs->ev_tail); + + while (tail != head) { + ev = cs->events + head; + kfree(ev->ptr); + + head = (head + 1) % MAX_EVENTS; + } + + atomic_set(&cs->ev_head, tail); +} + +struct event_t *gigaset_add_event(struct cardstate *cs, + struct at_state_t *at_state, int type, + void *ptr, int parameter, void *arg) +{ + unsigned long flags; + unsigned next, tail; + struct event_t *event = NULL; + + spin_lock_irqsave(&cs->ev_lock, flags); + + tail = atomic_read(&cs->ev_tail); + next = (tail + 1) % MAX_EVENTS; + if (unlikely(next == atomic_read(&cs->ev_head))) + err("event queue full"); + else { + event = cs->events + tail; + event->type = type; + event->at_state = at_state; + event->cid = -1; + event->ptr = ptr; + event->arg = arg; + event->parameter = parameter; + atomic_set(&cs->ev_tail, next); + } + + spin_unlock_irqrestore(&cs->ev_lock, flags); + + return event; +} +EXPORT_SYMBOL_GPL(gigaset_add_event); + +static void free_strings(struct at_state_t *at_state) +{ + int i; + + for (i = 0; i < STR_NUM; ++i) { + kfree(at_state->str_var[i]); + at_state->str_var[i] = NULL; + } +} + +static void clear_at_state(struct at_state_t *at_state) +{ + free_strings(at_state); +} + +static void dealloc_at_states(struct cardstate *cs) +{ + struct at_state_t *cur, *next; + + list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { + list_del(&cur->list); + free_strings(cur); + kfree(cur); + } +} + +static void gigaset_freebcs(struct bc_state *bcs) +{ + int i; + + dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); + if (!bcs->cs->ops->freebcshw(bcs)) { + dbg(DEBUG_INIT, "failed"); + } + + dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); + clear_at_state(&bcs->at_state); + dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); + + if (bcs->skb) + dev_kfree_skb(bcs->skb); + for (i = 0; i < AT_NUM; ++i) { + kfree(bcs->commands[i]); + bcs->commands[i] = NULL; + } +} + +void gigaset_freecs(struct cardstate *cs) +{ + int i; + unsigned long flags; + + if (!cs) + return; + + down(&cs->sem); + + if (!cs->bcs) + goto f_cs; + if (!cs->inbuf) + goto f_bcs; + + spin_lock_irqsave(&cs->lock, flags); + atomic_set(&cs->running, 0); + spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */ + + tasklet_kill(&cs->event_tasklet); + del_timer_sync(&cs->timer); + + switch (cs->cs_init) { + default: + gigaset_if_free(cs); + + dbg(DEBUG_INIT, "clearing hw"); + cs->ops->freecshw(cs); + + //FIXME cmdbuf + + /* fall through */ + case 2: /* error in initcshw */ + /* Deregister from LL */ + make_invalid(cs, VALID_ID); + dbg(DEBUG_INIT, "clearing iif"); + gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD); + + /* fall through */ + case 1: /* error when regestering to LL */ + dbg(DEBUG_INIT, "clearing at_state"); + clear_at_state(&cs->at_state); + dealloc_at_states(cs); + + /* fall through */ + case 0: /* error in one call to initbcs */ + for (i = 0; i < cs->channels; ++i) { + dbg(DEBUG_INIT, "clearing bcs[%d]", i); + gigaset_freebcs(cs->bcs + i); + } + + clear_events(cs); + dbg(DEBUG_INIT, "freeing inbuf"); + kfree(cs->inbuf); + } +f_bcs: dbg(DEBUG_INIT, "freeing bcs[]"); + kfree(cs->bcs); +f_cs: dbg(DEBUG_INIT, "freeing cs"); + up(&cs->sem); + free_cs(cs); +} +EXPORT_SYMBOL_GPL(gigaset_freecs); + +void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, + struct cardstate *cs, int cid) +{ + int i; + + INIT_LIST_HEAD(&at_state->list); + at_state->waiting = 0; + at_state->getstring = 0; + at_state->pending_commands = 0; + at_state->timer_expires = 0; + at_state->timer_active = 0; + atomic_set(&at_state->timer_index, 0); + atomic_set(&at_state->seq_index, 0); + at_state->ConState = 0; + for (i = 0; i < STR_NUM; ++i) + at_state->str_var[i] = NULL; + at_state->int_var[VAR_ZDLE] = 0; + at_state->int_var[VAR_ZCTP] = -1; + at_state->int_var[VAR_ZSAU] = ZSAU_NULL; + at_state->cs = cs; + at_state->bcs = bcs; + at_state->cid = cid; + if (!cid) + at_state->replystruct = cs->tabnocid; + else + at_state->replystruct = cs->tabcid; +} + + +static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, + struct cardstate *cs, int inputstate) +/* inbuf->read must be allocated before! */ +{ + atomic_set(&inbuf->head, 0); + atomic_set(&inbuf->tail, 0); + inbuf->cs = cs; + inbuf->bcs = bcs; /*base driver: NULL*/ + inbuf->rcvbuf = NULL; //FIXME + inbuf->inputstate = inputstate; +} + +/* Initialize the b-channel structure */ +static struct bc_state *gigaset_initbcs(struct bc_state *bcs, + struct cardstate *cs, int channel) +{ + int i; + + bcs->tx_skb = NULL; //FIXME -> hw part + + skb_queue_head_init(&bcs->squeue); + + bcs->corrupted = 0; + bcs->trans_down = 0; + bcs->trans_up = 0; + + dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel); + gigaset_at_init(&bcs->at_state, bcs, cs, -1); + + bcs->rcvbytes = 0; + +#ifdef CONFIG_GIGASET_DEBUG + bcs->emptycount = 0; +#endif + + dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel); + bcs->fcs = PPP_INITFCS; + bcs->inputstate = 0; + if (cs->ignoreframes) { + bcs->inputstate |= INS_skip_frame; + bcs->skb = NULL; + } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) + skb_reserve(bcs->skb, HW_HDR_LEN); + else { + warn("could not allocate skb"); + bcs->inputstate |= INS_skip_frame; + } + + bcs->channel = channel; + bcs->cs = cs; + + bcs->chstate = 0; + bcs->use_count = 1; + bcs->busy = 0; + bcs->ignore = cs->ignoreframes; + + for (i = 0; i < AT_NUM; ++i) + bcs->commands[i] = NULL; + + dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); + if (cs->ops->initbcshw(bcs)) + return bcs; + +//error: + dbg(DEBUG_INIT, " failed"); + + dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); + if (bcs->skb) + dev_kfree_skb(bcs->skb); + + return NULL; +} + +/* gigaset_initcs + * Allocate and initialize cardstate structure for Gigaset driver + * Calls hardware dependent gigaset_initcshw() function + * Calls B channel initialization function gigaset_initbcs() for each B channel + * parameters: + * drv hardware driver the device belongs to + * channels number of B channels supported by device + * onechannel !=0: B channel data and AT commands share one communication channel + * ==0: B channels have separate communication channels + * ignoreframes number of frames to ignore after setting up B channel + * cidmode !=0: start in CallID mode + * modulename name of driver module (used for I4L registration) + * return value: + * pointer to cardstate structure + */ +struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, + int onechannel, int ignoreframes, + int cidmode, const char *modulename) +{ + struct cardstate *cs = NULL; + int i; + + dbg(DEBUG_INIT, "allocating cs"); + cs = alloc_cs(drv); + if (!cs) + goto error; + dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); + cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); + if (!cs->bcs) + goto error; + dbg(DEBUG_INIT, "allocating inbuf"); + cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL); + if (!cs->inbuf) + goto error; + + cs->cs_init = 0; + cs->channels = channels; + cs->onechannel = onechannel; + cs->ignoreframes = ignoreframes; + INIT_LIST_HEAD(&cs->temp_at_states); + atomic_set(&cs->running, 0); + init_timer(&cs->timer); /* clear next & prev */ + spin_lock_init(&cs->ev_lock); + atomic_set(&cs->ev_tail, 0); + atomic_set(&cs->ev_head, 0); + init_MUTEX_LOCKED(&cs->sem); + tasklet_init(&cs->event_tasklet, &gigaset_handle_event, (unsigned long) cs); + atomic_set(&cs->commands_pending, 0); + cs->cur_at_seq = 0; + cs->gotfwver = -1; + cs->open_count = 0; + cs->tty = NULL; + atomic_set(&cs->cidmode, cidmode != 0); + + //if(onechannel) { //FIXME + cs->tabnocid = gigaset_tab_nocid_m10x; + cs->tabcid = gigaset_tab_cid_m10x; + //} else { + // cs->tabnocid = gigaset_tab_nocid; + // cs->tabcid = gigaset_tab_cid; + //} + + init_waitqueue_head(&cs->waitqueue); + cs->waiting = 0; + + atomic_set(&cs->mode, M_UNKNOWN); + atomic_set(&cs->mstate, MS_UNINITIALIZED); + + for (i = 0; i < channels; ++i) { + dbg(DEBUG_INIT, "setting up bcs[%d].read", i); + if (!gigaset_initbcs(cs->bcs + i, cs, i)) + goto error; + } + + ++cs->cs_init; + + dbg(DEBUG_INIT, "setting up at_state"); + spin_lock_init(&cs->lock); + gigaset_at_init(&cs->at_state, NULL, cs, 0); + cs->dle = 0; + cs->cbytes = 0; + + dbg(DEBUG_INIT, "setting up inbuf"); + if (onechannel) { //FIXME distinction necessary? + gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command); + } else + gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command); + + atomic_set(&cs->connected, 0); + + dbg(DEBUG_INIT, "setting up cmdbuf"); + cs->cmdbuf = cs->lastcmdbuf = NULL; + spin_lock_init(&cs->cmdlock); + cs->curlen = 0; + cs->cmdbytes = 0; + + /* + * Tell the ISDN4Linux subsystem (the LL) that + * a driver for a USB-Device is available ! + * If this is done, "isdnctrl" is able to bind a device for this driver even + * if no physical usb-device is currently connected. + * But this device will just be accessable if a physical USB device is connected + * (via "gigaset_probe") . + */ + dbg(DEBUG_INIT, "setting up iif"); + if (!gigaset_register_to_LL(cs, modulename)) { + err("register_isdn=>error"); + goto error; + } + + make_valid(cs, VALID_ID); + ++cs->cs_init; + dbg(DEBUG_INIT, "setting up hw"); + if (!cs->ops->initcshw(cs)) + goto error; + + ++cs->cs_init; + + gigaset_if_init(cs); + + atomic_set(&cs->running, 1); + cs->timer.data = (unsigned long) cs; + cs->timer.function = timer_tick; + cs->timer.expires = jiffies + GIG_TICK; + /* FIXME: can jiffies increase too much until the timer is added? + * Same problem(?) with mod_timer() in timer_tick(). */ + add_timer(&cs->timer); + + dbg(DEBUG_INIT, "cs initialized!"); + up(&cs->sem); + return cs; + +error: if (cs) + up(&cs->sem); + dbg(DEBUG_INIT, "failed"); + gigaset_freecs(cs); + return NULL; +} +EXPORT_SYMBOL_GPL(gigaset_initcs); + +/* ReInitialize the b-channel structure */ /* e.g. called on hangup, disconnect */ +void gigaset_bcs_reinit(struct bc_state *bcs) +{ + struct sk_buff *skb; + struct cardstate *cs = bcs->cs; + unsigned long flags; + + while ((skb = skb_dequeue(&bcs->squeue)) != NULL) + dev_kfree_skb(skb); + + spin_lock_irqsave(&cs->lock, flags); //FIXME + clear_at_state(&bcs->at_state); + bcs->at_state.ConState = 0; + bcs->at_state.timer_active = 0; + bcs->at_state.timer_expires = 0; + bcs->at_state.cid = -1; /* No CID defined */ + spin_unlock_irqrestore(&cs->lock, flags); + + bcs->inputstate = 0; + +#ifdef CONFIG_GIGASET_DEBUG + bcs->emptycount = 0; +#endif + + bcs->fcs = PPP_INITFCS; + bcs->chstate = 0; + + bcs->ignore = cs->ignoreframes; + if (bcs->ignore) + bcs->inputstate |= INS_skip_frame; + + + cs->ops->reinitbcshw(bcs); +} + +static void cleanup_cs(struct cardstate *cs) +{ + struct cmdbuf_t *cb, *tcb; + int i; + unsigned long flags; + + spin_lock_irqsave(&cs->lock, flags); + + atomic_set(&cs->mode, M_UNKNOWN); + atomic_set(&cs->mstate, MS_UNINITIALIZED); + + clear_at_state(&cs->at_state); + dealloc_at_states(cs); + free_strings(&cs->at_state); + gigaset_at_init(&cs->at_state, NULL, cs, 0); + + kfree(cs->inbuf->rcvbuf); + cs->inbuf->rcvbuf = NULL; + cs->inbuf->inputstate = INS_command; + atomic_set(&cs->inbuf->head, 0); + atomic_set(&cs->inbuf->tail, 0); + + cb = cs->cmdbuf; + while (cb) { + tcb = cb; + cb = cb->next; + kfree(tcb); + } + cs->cmdbuf = cs->lastcmdbuf = NULL; + cs->curlen = 0; + cs->cmdbytes = 0; + cs->gotfwver = -1; + cs->dle = 0; + cs->cur_at_seq = 0; + atomic_set(&cs->commands_pending, 0); + cs->cbytes = 0; + + spin_unlock_irqrestore(&cs->lock, flags); + + for (i = 0; i < cs->channels; ++i) { + gigaset_freebcs(cs->bcs + i); + if (!gigaset_initbcs(cs->bcs + i, cs, i)) + break; //FIXME error handling + } + + if (cs->waiting) { + cs->cmd_result = -ENODEV; + cs->waiting = 0; + wake_up_interruptible(&cs->waitqueue); + } +} + + +int gigaset_start(struct cardstate *cs) +{ + if (down_interruptible(&cs->sem)) + return 0; + //info("USB device for Gigaset 307x now attached to Dev %d", ucs->minor); + + atomic_set(&cs->connected, 1); + + if (atomic_read(&cs->mstate) != MS_LOCKED) { + cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS); + cs->ops->baud_rate(cs, B115200); + cs->ops->set_line_ctrl(cs, CS8); + cs->control_state = TIOCM_DTR|TIOCM_RTS; + } else { + //FIXME use some saved values? + } + + cs->waiting = 1; + + if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { + cs->waiting = 0; + //FIXME what should we do? + goto error; + } + + dbg(DEBUG_CMD, "scheduling START"); + gigaset_schedule_event(cs); + + wait_event(cs->waitqueue, !cs->waiting); + + up(&cs->sem); + return 1; + +error: + up(&cs->sem); + return 0; +} +EXPORT_SYMBOL_GPL(gigaset_start); + +void gigaset_shutdown(struct cardstate *cs) +{ + down(&cs->sem); + + cs->waiting = 1; + + if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) { + //FIXME what should we do? + goto exit; + } + + dbg(DEBUG_CMD, "scheduling SHUTDOWN"); + gigaset_schedule_event(cs); + + if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) { + warn("aborted"); + //FIXME + } + + if (atomic_read(&cs->mstate) != MS_LOCKED) { + //FIXME? + //gigaset_baud_rate(cs, B115200); + //gigaset_set_line_ctrl(cs, CS8); + //gigaset_set_modem_ctrl(cs, TIOCM_DTR|TIOCM_RTS, 0); + //cs->control_state = 0; + } else { + //FIXME use some saved values? + } + + cleanup_cs(cs); + +exit: + up(&cs->sem); +} +EXPORT_SYMBOL_GPL(gigaset_shutdown); + +void gigaset_stop(struct cardstate *cs) +{ + down(&cs->sem); + + atomic_set(&cs->connected, 0); + + cs->waiting = 1; + + if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) { + //FIXME what should we do? + goto exit; + } + + dbg(DEBUG_CMD, "scheduling STOP"); + gigaset_schedule_event(cs); + + if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) { + warn("aborted"); + //FIXME + } + + /* Tell the LL that the device is not available .. */ + gigaset_i4l_cmd(cs, ISDN_STAT_STOP); // FIXME move to event layer? + + cleanup_cs(cs); + +exit: + up(&cs->sem); +} +EXPORT_SYMBOL_GPL(gigaset_stop); + +static LIST_HEAD(drivers); +static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED; + +struct cardstate *gigaset_get_cs_by_id(int id) +{ + unsigned long flags; + static struct cardstate *ret = NULL; + static struct cardstate *cs; + struct gigaset_driver *drv; + unsigned i; + + spin_lock_irqsave(&driver_lock, flags); + list_for_each_entry(drv, &drivers, list) { + spin_lock(&drv->lock); + for (i = 0; i < drv->minors; ++i) { + if (drv->flags[i] & VALID_ID) { + cs = drv->cs + i; + if (cs->myid == id) + ret = cs; + } + if (ret) + break; + } + spin_unlock(&drv->lock); + if (ret) + break; + } + spin_unlock_irqrestore(&driver_lock, flags); + return ret; +} + +void gigaset_debugdrivers(void) +{ + unsigned long flags; + static struct cardstate *cs; + struct gigaset_driver *drv; + unsigned i; + + spin_lock_irqsave(&driver_lock, flags); + list_for_each_entry(drv, &drivers, list) { + dbg(DEBUG_DRIVER, "driver %p", drv); + spin_lock(&drv->lock); + for (i = 0; i < drv->minors; ++i) { + dbg(DEBUG_DRIVER, " index %u", i); + dbg(DEBUG_DRIVER, " flags 0x%02x", drv->flags[i]); + cs = drv->cs + i; + dbg(DEBUG_DRIVER, " cardstate %p", cs); + dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index); + dbg(DEBUG_DRIVER, " driver %p", cs->driver); + dbg(DEBUG_DRIVER, " i4l id %d", cs->myid); + } + spin_unlock(&drv->lock); + } + spin_unlock_irqrestore(&driver_lock, flags); +} +EXPORT_SYMBOL_GPL(gigaset_debugdrivers); + +struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) +{ + if (tty->index < 0 || tty->index >= tty->driver->num) + return NULL; + return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); +} + +struct cardstate *gigaset_get_cs_by_minor(unsigned minor) +{ + unsigned long flags; + static struct cardstate *ret = NULL; + struct gigaset_driver *drv; + unsigned index; + + spin_lock_irqsave(&driver_lock, flags); + list_for_each_entry(drv, &drivers, list) { + if (minor < drv->minor || minor >= drv->minor + drv->minors) + continue; + index = minor - drv->minor; + spin_lock(&drv->lock); + if (drv->flags[index] & VALID_MINOR) + ret = drv->cs + index; + spin_unlock(&drv->lock); + if (ret) + break; + } + spin_unlock_irqrestore(&driver_lock, flags); + return ret; +} + +void gigaset_freedriver(struct gigaset_driver *drv) +{ + unsigned long flags; + + spin_lock_irqsave(&driver_lock, flags); + list_del(&drv->list); + spin_unlock_irqrestore(&driver_lock, flags); + + gigaset_if_freedriver(drv); + module_put(drv->owner); + + kfree(drv->cs); + kfree(drv->flags); + kfree(drv); +} +EXPORT_SYMBOL_GPL(gigaset_freedriver); + +/* gigaset_initdriver + * Allocate and initialize gigaset_driver structure. Initialize interface. + * parameters: + * minor First minor number + * minors Number of minors this driver can handle + * procname Name of the driver (e.g. for /proc/tty/drivers, path in /proc/driver) + * devname Name of the device files (prefix without minor number) + * devfsname Devfs name of the device files without %d + * return value: + * Pointer to the gigaset_driver structure on success, NULL on failure. + */ +struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, + const char *procname, + const char *devname, + const char *devfsname, + const struct gigaset_ops *ops, + struct module *owner) +{ + struct gigaset_driver *drv; + unsigned long flags; + unsigned i; + + drv = kmalloc(sizeof *drv, GFP_KERNEL); + if (!drv) + return NULL; + if (!try_module_get(owner)) + return NULL; + + drv->cs = NULL; + drv->have_tty = 0; + drv->minor = minor; + drv->minors = minors; + spin_lock_init(&drv->lock); + drv->blocked = 0; + drv->ops = ops; + drv->owner = owner; + INIT_LIST_HEAD(&drv->list); + + drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL); + if (!drv->cs) + goto out1; + drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL); + if (!drv->flags) + goto out2; + + for (i = 0; i < minors; ++i) { + drv->flags[i] = 0; + drv->cs[i].driver = drv; + drv->cs[i].ops = drv->ops; + drv->cs[i].minor_index = i; + } + + gigaset_if_initdriver(drv, procname, devname, devfsname); + + spin_lock_irqsave(&driver_lock, flags); + list_add(&drv->list, &drivers); + spin_unlock_irqrestore(&driver_lock, flags); + + return drv; + +out2: + kfree(drv->cs); +out1: + kfree(drv); + module_put(owner); + return NULL; +} +EXPORT_SYMBOL_GPL(gigaset_initdriver); + +static struct cardstate *alloc_cs(struct gigaset_driver *drv) +{ + unsigned long flags; + unsigned i; + static struct cardstate *ret = NULL; + + spin_lock_irqsave(&drv->lock, flags); + for (i = 0; i < drv->minors; ++i) { + if (!(drv->flags[i] & VALID_MINOR)) { + drv->flags[i] = VALID_MINOR; + ret = drv->cs + i; + } + if (ret) + break; + } + spin_unlock_irqrestore(&drv->lock, flags); + return ret; +} + +static void free_cs(struct cardstate *cs) +{ + unsigned long flags; + struct gigaset_driver *drv = cs->driver; + spin_lock_irqsave(&drv->lock, flags); + drv->flags[cs->minor_index] = 0; + spin_unlock_irqrestore(&drv->lock, flags); +} + +static void make_valid(struct cardstate *cs, unsigned mask) +{ + unsigned long flags; + struct gigaset_driver *drv = cs->driver; + spin_lock_irqsave(&drv->lock, flags); + drv->flags[cs->minor_index] |= mask; + spin_unlock_irqrestore(&drv->lock, flags); +} + +static void make_invalid(struct cardstate *cs, unsigned mask) +{ + unsigned long flags; + struct gigaset_driver *drv = cs->driver; + spin_lock_irqsave(&drv->lock, flags); + drv->flags[cs->minor_index] &= ~mask; + spin_unlock_irqrestore(&drv->lock, flags); +} + +/* For drivers without fixed assignment device<->cardstate (usb) */ +struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv) +{ + unsigned long flags; + struct cardstate *cs = NULL; + unsigned i; + + spin_lock_irqsave(&drv->lock, flags); + if (drv->blocked) + goto exit; + for (i = 0; i < drv->minors; ++i) { + if ((drv->flags[i] & VALID_MINOR) && + !(drv->flags[i] & ASSIGNED)) { + drv->flags[i] |= ASSIGNED; + cs = drv->cs + i; + break; + } + } +exit: + spin_unlock_irqrestore(&drv->lock, flags); + return cs; +} +EXPORT_SYMBOL_GPL(gigaset_getunassignedcs); + +void gigaset_unassign(struct cardstate *cs) +{ + unsigned long flags; + unsigned *minor_flags; + struct gigaset_driver *drv; + + if (!cs) + return; + drv = cs->driver; + spin_lock_irqsave(&drv->lock, flags); + minor_flags = drv->flags + cs->minor_index; + if (*minor_flags & VALID_MINOR) + *minor_flags &= ~ASSIGNED; + spin_unlock_irqrestore(&drv->lock, flags); +} +EXPORT_SYMBOL_GPL(gigaset_unassign); + +void gigaset_blockdriver(struct gigaset_driver *drv) +{ + unsigned long flags; + spin_lock_irqsave(&drv->lock, flags); + drv->blocked = 1; + spin_unlock_irqrestore(&drv->lock, flags); +} +EXPORT_SYMBOL_GPL(gigaset_blockdriver); + +static int __init gigaset_init_module(void) +{ + /* in accordance with the principle of least astonishment, + * setting the 'debug' parameter to 1 activates a sensible + * set of default debug levels + */ + if (gigaset_debuglevel == 1) + gigaset_debuglevel = DEBUG_DEFAULT; + + info(DRIVER_AUTHOR); + info(DRIVER_DESC); + return 0; +} + +static void __exit gigaset_exit_module(void) +{ +} + +module_init(gigaset_init_module); +module_exit(gigaset_exit_module); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); + +MODULE_LICENSE("GPL"); diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c new file mode 100644 index 000000000000..fdcb80bb21c7 --- /dev/null +++ b/drivers/isdn/gigaset/ev-layer.c @@ -0,0 +1,1983 @@ +/* + * Stuff used by all variants of the driver + * + * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, + * Hansjoerg Lipp <hjlipp@web.de>, + * Tilman Schmidt <tilman@imap.cc>. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: ev-layer.c,v 1.4.2.18 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" + +/* ========================================================== */ +/* bit masks for pending commands */ +#define PC_INIT 0x004 +#define PC_DLE0 0x008 +#define PC_DLE1 0x010 +#define PC_CID 0x080 +#define PC_NOCID 0x100 +#define PC_HUP 0x002 +#define PC_DIAL 0x001 +#define PC_ACCEPT 0x040 +#define PC_SHUTDOWN 0x020 +#define PC_CIDMODE 0x200 +#define PC_UMMODE 0x400 + +/* types of modem responses */ +#define RT_NOTHING 0 +#define RT_ZSAU 1 +#define RT_RING 2 +#define RT_NUMBER 3 +#define RT_STRING 4 +#define RT_HEX 5 +#define RT_ZCAU 6 + +/* Possible ASCII responses */ +#define RSP_OK 0 +//#define RSP_BUSY 1 +//#define RSP_CONNECT 2 +#define RSP_ZGCI 3 +#define RSP_RING 4 +#define RSP_ZAOC 5 +#define RSP_ZCSTR 6 +#define RSP_ZCFGT 7 +#define RSP_ZCFG 8 +#define RSP_ZCCR 9 +#define RSP_EMPTY 10 +#define RSP_ZLOG 11 +#define RSP_ZCAU 12 +#define RSP_ZMWI 13 +#define RSP_ZABINFO 14 +#define RSP_ZSMLSTCHG 15 +#define RSP_VAR 100 +#define RSP_ZSAU (RSP_VAR + VAR_ZSAU) +#define RSP_ZDLE (RSP_VAR + VAR_ZDLE) +#define RSP_ZVLS (RSP_VAR + VAR_ZVLS) +#define RSP_ZCTP (RSP_VAR + VAR_ZCTP) +#define RSP_STR (RSP_VAR + VAR_NUM) +#define RSP_NMBR (RSP_STR + STR_NMBR) +#define RSP_ZCPN (RSP_STR + STR_ZCPN) +#define RSP_ZCON (RSP_STR + STR_ZCON) +#define RSP_ZBC (RSP_STR + STR_ZBC) +#define RSP_ZHLC (RSP_STR + STR_ZHLC) +#define RSP_ERROR -1 /* ERROR */ +#define RSP_WRONG_CID -2 /* unknown cid in cmd */ +//#define RSP_EMPTY -3 +#define RSP_UNKNOWN -4 /* unknown response */ +#define RSP_FAIL -5 /* internal error */ +#define RSP_INVAL -6 /* invalid response */ + +#define RSP_NONE -19 +#define RSP_STRING -20 +#define RSP_NULL -21 +//#define RSP_RETRYFAIL -22 +//#define RSP_RETRY -23 +//#define RSP_SKIP -24 +#define RSP_INIT -27 +#define RSP_ANY -26 +#define RSP_LAST -28 +#define RSP_NODEV -9 + +/* actions for process_response */ +#define ACT_NOTHING 0 +#define ACT_SETDLE1 1 +#define ACT_SETDLE0 2 +#define ACT_FAILINIT 3 +#define ACT_HUPMODEM 4 +#define ACT_CONFIGMODE 5 +#define ACT_INIT 6 +#define ACT_DLE0 7 +#define ACT_DLE1 8 +#define ACT_FAILDLE0 9 +#define ACT_FAILDLE1 10 +#define ACT_RING 11 +#define ACT_CID 12 +#define ACT_FAILCID 13 +#define ACT_SDOWN 14 +#define ACT_FAILSDOWN 15 +#define ACT_DEBUG 16 +#define ACT_WARN 17 +#define ACT_DIALING 18 +#define ACT_ABORTDIAL 19 +#define ACT_DISCONNECT 20 +#define ACT_CONNECT 21 +#define ACT_REMOTEREJECT 22 +#define ACT_CONNTIMEOUT 23 +#define ACT_REMOTEHUP 24 +#define ACT_ABORTHUP 25 +#define ACT_ICALL 26 +#define ACT_ACCEPTED 27 +#define ACT_ABORTACCEPT 28 +#define ACT_TIMEOUT 29 +#define ACT_GETSTRING 30 +#define ACT_SETVER 31 +#define ACT_FAILVER 32 +#define ACT_GOTVER 33 +#define ACT_TEST 34 +#define ACT_ERROR 35 +#define ACT_ABORTCID 36 +#define ACT_ZCAU 37 +#define ACT_NOTIFY_BC_DOWN 38 +#define ACT_NOTIFY_BC_UP 39 +#define ACT_DIAL 40 +#define ACT_ACCEPT 41 +#define ACT_PROTO_L2 42 +#define ACT_HUP 43 +#define ACT_IF_LOCK 44 +#define ACT_START 45 +#define ACT_STOP 46 +#define ACT_FAKEDLE0 47 +#define ACT_FAKEHUP 48 +#define ACT_FAKESDOWN 49 +#define ACT_SHUTDOWN 50 +#define ACT_PROC_CIDMODE 51 +#define ACT_UMODESET 52 +#define ACT_FAILUMODE 53 +#define ACT_CMODESET 54 +#define ACT_FAILCMODE 55 +#define ACT_IF_VER 56 +#define ACT_CMD 100 + +/* at command sequences */ +#define SEQ_NONE 0 +#define SEQ_INIT 100 +#define SEQ_DLE0 200 +#define SEQ_DLE1 250 +#define SEQ_CID 300 +#define SEQ_NOCID 350 +#define SEQ_HUP 400 +#define SEQ_DIAL 600 +#define SEQ_ACCEPT 720 +#define SEQ_SHUTDOWN 500 +#define SEQ_CIDMODE 10 +#define SEQ_UMMODE 11 + + +// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring +struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */ +{ + /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ + + /* initialize device, set cid mode if possible */ + //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}}, + //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}}, + //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT, + // {ACT_TIMEOUT}}, + + {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT, + {ACT_TIMEOUT}}, /* wait until device is ready */ + + {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */ + {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */ + + {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */ + {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */ + + {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */ + {RSP_OK, 108,108, -1, 104,-1}, + {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"}, + {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}}, + {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}}, + + {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0, + ACT_HUPMODEM, + ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */ + {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"}, + + {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */ + {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}}, + {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}}, + {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}}, + + {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}}, + {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}}, + + {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}}, + + {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, + {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, + {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}}, +#if 0 + {EV_TIMEOUT, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"}, + {RSP_ERROR, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"}, + {RSP_OK, 121,121, -1, 130, 5, {ACT_GOTVER}, "^SGCI=1\r"}, + + {RSP_OK, 130,130, -1, 0, 0, {ACT_INIT}}, + {RSP_ERROR, 130,130, -1, 0, 0, {ACT_FAILINIT}}, + {EV_TIMEOUT, 130,130, -1, 0, 0, {ACT_FAILINIT}}, +#endif + + /* leave dle mode */ + {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, + {RSP_OK, 201,201, -1, 202,-1}, + //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE + {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, + {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, + {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, + {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, + + /* enter dle mode */ + {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, + {RSP_OK, 251,251, -1, 252,-1}, + {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}}, + {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, + {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, + + /* incoming call */ + {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}}, + + /* get cid */ + //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}}, + //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}}, + //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"}, + + {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, + {RSP_OK, 301,301, -1, 302,-1}, + {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}}, + {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}}, + {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}}, + + /* enter cid mode */ + {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, + {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}}, + {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, + {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, + + /* leave cid mode */ + //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"}, + {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"}, + {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}}, + {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, + {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, + + /* abort getting cid */ + {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}}, + + /* reset */ +#if 0 + {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 503, 5, {0}, "^SGCI=0\r"}, + {RSP_OK, 503,503, -1, 504, 5, {0}, "Z\r"}, +#endif + {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, + {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}}, + {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, + {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, + {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}}, + + {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME + {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME + {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME + {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME + {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME + {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME + + /* misc. */ + {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + + {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, + {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, + {RSP_LAST} +}; + +// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall +struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */ +{ + /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ + + /* dial */ + {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME + {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}}, + {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}}, + {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, + {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, + {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}}, + {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, + {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, + {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, + {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ + {RSP_OK, 607,607, -1, 608,-1}, + //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE + {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, + {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, + + {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}}, + {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, + {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, + + /* dialing */ + {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */ + + /* connection established */ + {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 + {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 + + {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout + + /* remote hangup */ + {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, + {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, + {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, + + /* hangup */ + {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME + {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? + {RSP_OK, 401,401, -1, 402, 5}, + {RSP_ZVLS, 402,402, 0, 403, 5}, + {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ + //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, + {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, + + {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout + + /* ring */ + {RSP_ZBC, 700,700, -1, -1,-1, {0}}, + {RSP_ZHLC, 700,700, -1, -1,-1, {0}}, + {RSP_NMBR, 700,700, -1, -1,-1, {0}}, + {RSP_ZCPN, 700,700, -1, -1,-1, {0}}, + {RSP_ZCTP, 700,700, -1, -1,-1, {0}}, + {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}}, + {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, + + /*accept icall*/ + {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME + {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}}, + {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}}, + {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ + {RSP_OK, 723,723, -1, 724, 5, {0}}, + {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}}, + {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, + {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, + {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}}, + {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, + {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, + + {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}}, + + /* misc. */ + {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME + + {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME + + {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, + {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, + {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, + {RSP_LAST} +}; + + +#if 0 +static struct reply_t tab_nocid[]= /* no dle mode */ //FIXME aenderungen uebernehmen +{ + /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ + + {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL}, + {RSP_LAST,0,0,0,0,0,0} +}; + +static struct reply_t tab_cid[] = /* no dle mode */ //FIXME aenderungen uebernehmen +{ + /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ + + {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL}, + {RSP_LAST,0,0,0,0,0,0} +}; +#endif + +static struct resp_type_t resp_type[]= +{ + /*{"", RSP_EMPTY, RT_NOTHING},*/ + {"OK", RSP_OK, RT_NOTHING}, + {"ERROR", RSP_ERROR, RT_NOTHING}, + {"ZSAU", RSP_ZSAU, RT_ZSAU}, + {"ZCAU", RSP_ZCAU, RT_ZCAU}, + {"RING", RSP_RING, RT_RING}, + {"ZGCI", RSP_ZGCI, RT_NUMBER}, + {"ZVLS", RSP_ZVLS, RT_NUMBER}, + {"ZCTP", RSP_ZCTP, RT_NUMBER}, + {"ZDLE", RSP_ZDLE, RT_NUMBER}, + {"ZCFGT", RSP_ZCFGT, RT_NUMBER}, + {"ZCCR", RSP_ZCCR, RT_NUMBER}, + {"ZMWI", RSP_ZMWI, RT_NUMBER}, + {"ZHLC", RSP_ZHLC, RT_STRING}, + {"ZBC", RSP_ZBC, RT_STRING}, + {"NMBR", RSP_NMBR, RT_STRING}, + {"ZCPN", RSP_ZCPN, RT_STRING}, + {"ZCON", RSP_ZCON, RT_STRING}, + {"ZAOC", RSP_ZAOC, RT_STRING}, + {"ZCSTR", RSP_ZCSTR, RT_STRING}, + {"ZCFG", RSP_ZCFG, RT_HEX}, + {"ZLOG", RSP_ZLOG, RT_NOTHING}, + {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, + {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, + {NULL,0,0} +}; + +/* + * Get integer from char-pointer + */ +static int isdn_getnum(char *p) +{ + int v = -1; + + IFNULLRETVAL(p, -1); + + dbg(DEBUG_TRANSCMD, "string: %s", p); + + while (*p >= '0' && *p <= '9') + v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0'); + if (*p) + v = -1; /* invalid Character */ + return v; +} + +/* + * Get integer from char-pointer + */ +static int isdn_gethex(char *p) +{ + int v = 0; + int c; + + IFNULLRETVAL(p, -1); + + dbg(DEBUG_TRANSCMD, "string: %s", p); + + if (!*p) + return -1; + + do { + if (v > (INT_MAX - 15) / 16) + return -1; + c = *p; + if (c >= '0' && c <= '9') + c -= '0'; + else if (c >= 'a' && c <= 'f') + c -= 'a' - 10; + else if (c >= 'A' && c <= 'F') + c -= 'A' - 10; + else + return -1; + v = v * 16 + c; + } while (*++p); + + return v; +} + +static inline void new_index(atomic_t *index, int max) +{ + if (atomic_read(index) == max) //FIXME race? + atomic_set(index, 0); + else + atomic_inc(index); +} + +/* retrieve CID from parsed response + * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535 + */ +static int cid_of_response(char *s) +{ + int cid; + + if (s[-1] != ';') + return 0; /* no CID separator */ + cid = isdn_getnum(s); + if (cid < 0) + return 0; /* CID not numeric */ + if (cid < 1 || cid > 65535) + return -1; /* CID out of range */ + return cid; + //FIXME is ;<digit>+ at end of non-CID response really impossible? +} + +/* This function will be called via task queue from the callback handler. + * We received a modem response and have to handle it.. + */ +void gigaset_handle_modem_response(struct cardstate *cs) +{ + unsigned char *argv[MAX_REC_PARAMS + 1]; + int params; + int i, j; + struct resp_type_t *rt; + int curarg; + unsigned long flags; + unsigned next, tail, head; + struct event_t *event; + int resp_code; + int param_type; + int abort; + size_t len; + int cid; + int rawstring; + + IFNULLRET(cs); + + len = cs->cbytes; + if (!len) { + /* ignore additional LFs/CRs (M10x config mode or cx100) */ + dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]); + return; + } + cs->respdata[len] = 0; + dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata); + argv[0] = cs->respdata; + params = 1; + if (cs->at_state.getstring) { + /* getstring only allowed without cid at the moment */ + cs->at_state.getstring = 0; + rawstring = 1; + cid = 0; + } else { + /* parse line */ + for (i = 0; i < len; i++) + switch (cs->respdata[i]) { + case ';': + case ',': + case '=': + if (params > MAX_REC_PARAMS) { + warn("too many parameters in response"); + /* need last parameter (might be CID) */ + params--; + } + argv[params++] = cs->respdata + i + 1; + } + + rawstring = 0; + cid = params > 1 ? cid_of_response(argv[params-1]) : 0; + if (cid < 0) { + gigaset_add_event(cs, &cs->at_state, RSP_INVAL, + NULL, 0, NULL); + return; + } + + for (j = 1; j < params; ++j) + argv[j][-1] = 0; + + dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]); + if (cid) { + --params; + dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]); + } + dbg(DEBUG_TRANSCMD, "available params: %d", params - 1); + for (j = 1; j < params; j++) + dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]); + } + + spin_lock_irqsave(&cs->ev_lock, flags); + head = atomic_read(&cs->ev_head); + tail = atomic_read(&cs->ev_tail); + + abort = 1; + curarg = 0; + while (curarg < params) { + next = (tail + 1) % MAX_EVENTS; + if (unlikely(next == head)) { + err("event queue full"); + break; + } + + event = cs->events + tail; + event->at_state = NULL; + event->cid = cid; + event->ptr = NULL; + event->arg = NULL; + tail = next; + + if (rawstring) { + resp_code = RSP_STRING; + param_type = RT_STRING; + } else { + for (rt = resp_type; rt->response; ++rt) + if (!strcmp(argv[curarg], rt->response)) + break; + + if (!rt->response) { + event->type = RSP_UNKNOWN; + warn("unknown modem response: %s", + argv[curarg]); + break; + } + + resp_code = rt->resp_code; + param_type = rt->type; + ++curarg; + } + + event->type = resp_code; + + switch (param_type) { + case RT_NOTHING: + break; + case RT_RING: + if (!cid) { + err("received RING without CID!"); + event->type = RSP_INVAL; + abort = 1; + } else { + event->cid = 0; + event->parameter = cid; + abort = 0; + } + break; + case RT_ZSAU: + if (curarg >= params) { + event->parameter = ZSAU_NONE; + break; + } + if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING")) + event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING; + else if (!strcmp(argv[curarg], "CALL_DELIVERED")) + event->parameter = ZSAU_CALL_DELIVERED; + else if (!strcmp(argv[curarg], "ACTIVE")) + event->parameter = ZSAU_ACTIVE; + else if (!strcmp(argv[curarg], "DISCONNECT_IND")) + event->parameter = ZSAU_DISCONNECT_IND; + else if (!strcmp(argv[curarg], "NULL")) + event->parameter = ZSAU_NULL; + else if (!strcmp(argv[curarg], "DISCONNECT_REQ")) + event->parameter = ZSAU_DISCONNECT_REQ; + else { + event->parameter = ZSAU_UNKNOWN; + warn("%s: unknown parameter %s after ZSAU", + __func__, argv[curarg]); + } + ++curarg; + break; + case RT_STRING: + if (curarg < params) { + len = strlen(argv[curarg]) + 1; + event->ptr = kmalloc(len, GFP_ATOMIC); + if (event->ptr) + memcpy(event->ptr, argv[curarg], len); + else + err("no memory for string!"); + ++curarg; + } +#ifdef CONFIG_GIGASET_DEBUG + if (!event->ptr) + dbg(DEBUG_CMD, "string==NULL"); + else + dbg(DEBUG_CMD, + "string==%s", (char *) event->ptr); +#endif + break; + case RT_ZCAU: + event->parameter = -1; + if (curarg + 1 < params) { + i = isdn_gethex(argv[curarg]); + j = isdn_gethex(argv[curarg + 1]); + if (i >= 0 && i < 256 && j >= 0 && j < 256) + event->parameter = (unsigned) i << 8 + | j; + curarg += 2; + } else + curarg = params - 1; + break; + case RT_NUMBER: + case RT_HEX: + if (curarg < params) { + if (param_type == RT_HEX) + event->parameter = + isdn_gethex(argv[curarg]); + else + event->parameter = + isdn_getnum(argv[curarg]); + ++curarg; + } else + event->parameter = -1; +#ifdef CONFIG_GIGASET_DEBUG + dbg(DEBUG_CMD, "parameter==%d", event->parameter); +#endif + break; + } + + if (resp_code == RSP_ZDLE) + cs->dle = event->parameter; + + if (abort) + break; + } + + atomic_set(&cs->ev_tail, tail); + spin_unlock_irqrestore(&cs->ev_lock, flags); + + if (curarg != params) + dbg(DEBUG_ANY, "invalid number of processed parameters: %d/%d", + curarg, params); +} +EXPORT_SYMBOL_GPL(gigaset_handle_modem_response); + +/* disconnect + * process closing of connection associated with given AT state structure + */ +static void disconnect(struct at_state_t **at_state_p) +{ + unsigned long flags; + struct bc_state *bcs; + struct cardstate *cs; + + IFNULLRET(at_state_p); + IFNULLRET(*at_state_p); + bcs = (*at_state_p)->bcs; + cs = (*at_state_p)->cs; + IFNULLRET(cs); + + new_index(&(*at_state_p)->seq_index, MAX_SEQ_INDEX); + + /* revert to selected idle mode */ + if (!atomic_read(&cs->cidmode)) { + cs->at_state.pending_commands |= PC_UMMODE; + atomic_set(&cs->commands_pending, 1); //FIXME + dbg(DEBUG_CMD, "Scheduling PC_UMMODE"); + } + + if (bcs) { + /* B channel assigned: invoke hardware specific handler */ + cs->ops->close_bchannel(bcs); + } else { + /* no B channel assigned: just deallocate */ + spin_lock_irqsave(&cs->lock, flags); + list_del(&(*at_state_p)->list); + kfree(*at_state_p); + *at_state_p = NULL; + spin_unlock_irqrestore(&cs->lock, flags); + } +} + +/* get_free_channel + * get a free AT state structure: either one of those associated with the + * B channels of the Gigaset device, or if none of those is available, + * a newly allocated one with bcs=NULL + * The structure should be freed by calling disconnect() after use. + */ +static inline struct at_state_t *get_free_channel(struct cardstate *cs, + int cid) +/* cids: >0: siemens-cid + 0: without cid + -1: no cid assigned yet +*/ +{ + unsigned long flags; + int i; + struct at_state_t *ret; + + for (i = 0; i < cs->channels; ++i) + if (gigaset_get_channel(cs->bcs + i)) { + ret = &cs->bcs[i].at_state; + ret->cid = cid; + return ret; + } + + spin_lock_irqsave(&cs->lock, flags); + ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC); + if (ret) { + gigaset_at_init(ret, NULL, cs, cid); + list_add(&ret->list, &cs->temp_at_states); + } + spin_unlock_irqrestore(&cs->lock, flags); + return ret; +} + +static void init_failed(struct cardstate *cs, int mode) +{ + int i; + struct at_state_t *at_state; + + cs->at_state.pending_commands &= ~PC_INIT; + atomic_set(&cs->mode, mode); + atomic_set(&cs->mstate, MS_UNINITIALIZED); + gigaset_free_channels(cs); + for (i = 0; i < cs->channels; ++i) { + at_state = &cs->bcs[i].at_state; + if (at_state->pending_commands & PC_CID) { + at_state->pending_commands &= ~PC_CID; + at_state->pending_commands |= PC_NOCID; + atomic_set(&cs->commands_pending, 1); + } + } +} + +static void schedule_init(struct cardstate *cs, int state) +{ + if (cs->at_state.pending_commands & PC_INIT) { + dbg(DEBUG_CMD, "not scheduling PC_INIT again"); + return; + } + atomic_set(&cs->mstate, state); + atomic_set(&cs->mode, M_UNKNOWN); + gigaset_block_channels(cs); + cs->at_state.pending_commands |= PC_INIT; + atomic_set(&cs->commands_pending, 1); + dbg(DEBUG_CMD, "Scheduling PC_INIT"); +} + +/* Add "AT" to a command, add the cid, dle encode it, send the result to the hardware. */ +static void send_command(struct cardstate *cs, const char *cmd, int cid, + int dle, gfp_t kmallocflags) +{ + size_t cmdlen, buflen; + char *cmdpos, *cmdbuf, *cmdtail; + + cmdlen = strlen(cmd); + buflen = 11 + cmdlen; + + if (likely(buflen > cmdlen)) { + cmdbuf = kmalloc(buflen, kmallocflags); + if (likely(cmdbuf != NULL)) { + cmdpos = cmdbuf + 9; + cmdtail = cmdpos + cmdlen; + memcpy(cmdpos, cmd, cmdlen); + + if (cid > 0 && cid <= 65535) { + do { + *--cmdpos = '0' + cid % 10; + cid /= 10; + ++cmdlen; + } while (cid); + } + + cmdlen += 2; + *--cmdpos = 'T'; + *--cmdpos = 'A'; + + if (dle) { + cmdlen += 4; + *--cmdpos = '('; + *--cmdpos = 0x10; + *cmdtail++ = 0x10; + *cmdtail++ = ')'; + } + + cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL); + kfree(cmdbuf); + } else + err("no memory for command buffer"); + } else + err("overflow in buflen"); +} + +static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid) +{ + struct at_state_t *at_state; + int i; + unsigned long flags; + + if (cid == 0) + return &cs->at_state; + + for (i = 0; i < cs->channels; ++i) + if (cid == cs->bcs[i].at_state.cid) + return &cs->bcs[i].at_state; + + spin_lock_irqsave(&cs->lock, flags); + + list_for_each_entry(at_state, &cs->temp_at_states, list) + if (cid == at_state->cid) { + spin_unlock_irqrestore(&cs->lock, flags); + return at_state; + } + + spin_unlock_irqrestore(&cs->lock, flags); + + return NULL; +} + +static void bchannel_down(struct bc_state *bcs) +{ + IFNULLRET(bcs); + IFNULLRET(bcs->cs); + + if (bcs->chstate & CHS_B_UP) { + bcs->chstate &= ~CHS_B_UP; + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP); + } + + if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { + bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); + } + + gigaset_free_channel(bcs); + + gigaset_bcs_reinit(bcs); +} + +static void bchannel_up(struct bc_state *bcs) +{ + IFNULLRET(bcs); + + if (!(bcs->chstate & CHS_D_UP)) { + notice("%s: D channel not up", __func__); + bcs->chstate |= CHS_D_UP; + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); + } + + if (bcs->chstate & CHS_B_UP) { + notice("%s: B channel already up", __func__); + return; + } + + bcs->chstate |= CHS_B_UP; + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN); +} + +static void start_dial(struct at_state_t *at_state, void *data, int seq_index) +{ + struct bc_state *bcs = at_state->bcs; + struct cardstate *cs = at_state->cs; + int retval; + + bcs->chstate |= CHS_NOTIFY_LL; + //atomic_set(&bcs->status, BCS_INIT); + + if (atomic_read(&at_state->seq_index) != seq_index) + goto error; + + retval = gigaset_isdn_setup_dial(at_state, data); + if (retval != 0) + goto error; + + + at_state->pending_commands |= PC_CID; + dbg(DEBUG_CMD, "Scheduling PC_CID"); +//#ifdef GIG_MAYINITONDIAL +// if (atomic_read(&cs->MState) == MS_UNKNOWN) { +// cs->at_state.pending_commands |= PC_INIT; +// dbg(DEBUG_CMD, "Scheduling PC_INIT"); +// } +//#endif + atomic_set(&cs->commands_pending, 1); //FIXME + return; + +error: + at_state->pending_commands |= PC_NOCID; + dbg(DEBUG_CMD, "Scheduling PC_NOCID"); + atomic_set(&cs->commands_pending, 1); //FIXME + return; +} + +static void start_accept(struct at_state_t *at_state) +{ + struct cardstate *cs = at_state->cs; + int retval; + + retval = gigaset_isdn_setup_accept(at_state); + + if (retval == 0) { + at_state->pending_commands |= PC_ACCEPT; + dbg(DEBUG_CMD, "Scheduling PC_ACCEPT"); + atomic_set(&cs->commands_pending, 1); //FIXME + } else { + //FIXME + at_state->pending_commands |= PC_HUP; + dbg(DEBUG_CMD, "Scheduling PC_HUP"); + atomic_set(&cs->commands_pending, 1); //FIXME + } +} + +static void do_start(struct cardstate *cs) +{ + gigaset_free_channels(cs); + + if (atomic_read(&cs->mstate) != MS_LOCKED) + schedule_init(cs, MS_INIT); + + gigaset_i4l_cmd(cs, ISDN_STAT_RUN); + // FIXME: not in locked mode + // FIXME 2: only after init sequence + + cs->waiting = 0; + wake_up(&cs->waitqueue); +} + +static void finish_shutdown(struct cardstate *cs) +{ + if (atomic_read(&cs->mstate) != MS_LOCKED) { + atomic_set(&cs->mstate, MS_UNINITIALIZED); + atomic_set(&cs->mode, M_UNKNOWN); + } + + /* The rest is done by cleanup_cs () in user mode. */ + + cs->cmd_result = -ENODEV; + cs->waiting = 0; + wake_up_interruptible(&cs->waitqueue); +} + +static void do_shutdown(struct cardstate *cs) +{ + gigaset_block_channels(cs); + + if (atomic_read(&cs->mstate) == MS_READY) { + atomic_set(&cs->mstate, MS_SHUTDOWN); + cs->at_state.pending_commands |= PC_SHUTDOWN; + atomic_set(&cs->commands_pending, 1); //FIXME + dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN"); //FIXME + //gigaset_schedule_event(cs); //FIXME + } else + finish_shutdown(cs); +} + +static void do_stop(struct cardstate *cs) +{ + do_shutdown(cs); +} + +/* Entering cid mode or getting a cid failed: + * try to initialize the device and try again. + * + * channel >= 0: getting cid for the channel failed + * channel < 0: entering cid mode failed + * + * returns 0 on failure + */ +static int reinit_and_retry(struct cardstate *cs, int channel) +{ + int i; + + if (--cs->retry_count <= 0) + return 0; + + for (i = 0; i < cs->channels; ++i) + if (cs->bcs[i].at_state.cid > 0) + return 0; + + if (channel < 0) + warn("Could not enter cid mode. Reinit device and try again."); + else { + warn("Could not get a call id. Reinit device and try again."); + cs->bcs[channel].at_state.pending_commands |= PC_CID; + } + schedule_init(cs, MS_INIT); + return 1; +} + +static int at_state_invalid(struct cardstate *cs, + struct at_state_t *test_ptr) +{ + unsigned long flags; + unsigned channel; + struct at_state_t *at_state; + int retval = 0; + + spin_lock_irqsave(&cs->lock, flags); + + if (test_ptr == &cs->at_state) + goto exit; + + list_for_each_entry(at_state, &cs->temp_at_states, list) + if (at_state == test_ptr) + goto exit; + + for (channel = 0; channel < cs->channels; ++channel) + if (&cs->bcs[channel].at_state == test_ptr) + goto exit; + + retval = 1; +exit: + spin_unlock_irqrestore(&cs->lock, flags); + return retval; +} + +static void handle_icall(struct cardstate *cs, struct bc_state *bcs, + struct at_state_t **p_at_state) +{ + int retval; + struct at_state_t *at_state = *p_at_state; + + retval = gigaset_isdn_icall(at_state); + switch (retval) { + case ICALL_ACCEPT: + break; + default: + err("internal error: disposition=%d", retval); + /* --v-- fall through --v-- */ + case ICALL_IGNORE: + case ICALL_REJECT: + /* hang up actively + * Device doc says that would reject the call. + * In fact it doesn't. + */ + at_state->pending_commands |= PC_HUP; + atomic_set(&cs->commands_pending, 1); + break; + } +} + +static int do_lock(struct cardstate *cs) +{ + int mode; + int i; + + switch (atomic_read(&cs->mstate)) { + case MS_UNINITIALIZED: + case MS_READY: + if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) || + cs->at_state.pending_commands) + return -EBUSY; + + for (i = 0; i < cs->channels; ++i) + if (cs->bcs[i].at_state.pending_commands) + return -EBUSY; + + if (!gigaset_get_channels(cs)) + return -EBUSY; + + break; + case MS_LOCKED: + //retval = -EACCES; + break; + default: + return -EBUSY; + } + + mode = atomic_read(&cs->mode); + atomic_set(&cs->mstate, MS_LOCKED); + atomic_set(&cs->mode, M_UNKNOWN); + //FIXME reset card state / at states / bcs states + + return mode; +} + +static int do_unlock(struct cardstate *cs) +{ + if (atomic_read(&cs->mstate) != MS_LOCKED) + return -EINVAL; + + atomic_set(&cs->mstate, MS_UNINITIALIZED); + atomic_set(&cs->mode, M_UNKNOWN); + gigaset_free_channels(cs); + //FIXME reset card state / at states / bcs states + if (atomic_read(&cs->connected)) + schedule_init(cs, MS_INIT); + + return 0; +} + +static void do_action(int action, struct cardstate *cs, + struct bc_state *bcs, + struct at_state_t **p_at_state, char **pp_command, + int *p_genresp, int *p_resp_code, + struct event_t *ev) +{ + struct at_state_t *at_state = *p_at_state; + struct at_state_t *at_state2; + unsigned long flags; + + int channel; + + unsigned char *s, *e; + int i; + unsigned long val; + + switch (action) { + case ACT_NOTHING: + break; + case ACT_TIMEOUT: + at_state->waiting = 1; + break; + case ACT_INIT: + //FIXME setup everything + cs->at_state.pending_commands &= ~PC_INIT; + cs->cur_at_seq = SEQ_NONE; + atomic_set(&cs->mode, M_UNIMODEM); + if (!atomic_read(&cs->cidmode)) { + gigaset_free_channels(cs); + atomic_set(&cs->mstate, MS_READY); + break; + } + cs->at_state.pending_commands |= PC_CIDMODE; + atomic_set(&cs->commands_pending, 1); //FIXME + dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); + break; + case ACT_FAILINIT: + warn("Could not initialize the device."); + cs->dle = 0; + init_failed(cs, M_UNKNOWN); + cs->cur_at_seq = SEQ_NONE; + break; + case ACT_CONFIGMODE: + init_failed(cs, M_CONFIG); + cs->cur_at_seq = SEQ_NONE; + break; + case ACT_SETDLE1: + cs->dle = 1; + /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */ + cs->inbuf[0].inputstate &= + ~(INS_command | INS_DLE_command); + break; + case ACT_SETDLE0: + cs->dle = 0; + cs->inbuf[0].inputstate = + (cs->inbuf[0].inputstate & ~INS_DLE_command) + | INS_command; + break; + case ACT_CMODESET: + if (atomic_read(&cs->mstate) == MS_INIT || + atomic_read(&cs->mstate) == MS_RECOVER) { + gigaset_free_channels(cs); + atomic_set(&cs->mstate, MS_READY); + } + atomic_set(&cs->mode, M_CID); + cs->cur_at_seq = SEQ_NONE; + break; + case ACT_UMODESET: + atomic_set(&cs->mode, M_UNIMODEM); + cs->cur_at_seq = SEQ_NONE; + break; + case ACT_FAILCMODE: + cs->cur_at_seq = SEQ_NONE; + if (atomic_read(&cs->mstate) == MS_INIT || + atomic_read(&cs->mstate) == MS_RECOVER) { + init_failed(cs, M_UNKNOWN); + break; + } + if (!reinit_and_retry(cs, -1)) + schedule_init(cs, MS_RECOVER); + break; + case ACT_FAILUMODE: + cs->cur_at_seq = SEQ_NONE; + schedule_init(cs, MS_RECOVER); + break; + case ACT_HUPMODEM: + /* send "+++" (hangup in unimodem mode) */ + cs->ops->write_cmd(cs, "+++", 3, NULL); + break; + case ACT_RING: + /* get fresh AT state structure for new CID */ + at_state2 = get_free_channel(cs, ev->parameter); + if (!at_state2) { + warn("RING ignored: " + "could not allocate channel structure"); + break; + } + + /* initialize AT state structure + * note that bcs may be NULL if no B channel is free + */ + at_state2->ConState = 700; + kfree(at_state2->str_var[STR_NMBR]); + at_state2->str_var[STR_NMBR] = NULL; + kfree(at_state2->str_var[STR_ZCPN]); + at_state2->str_var[STR_ZCPN] = NULL; + kfree(at_state2->str_var[STR_ZBC]); + at_state2->str_var[STR_ZBC] = NULL; + kfree(at_state2->str_var[STR_ZHLC]); + at_state2->str_var[STR_ZHLC] = NULL; + at_state2->int_var[VAR_ZCTP] = -1; + + spin_lock_irqsave(&cs->lock, flags); + at_state2->timer_expires = RING_TIMEOUT; + at_state2->timer_active = 1; + spin_unlock_irqrestore(&cs->lock, flags); + break; + case ACT_ICALL: + handle_icall(cs, bcs, p_at_state); + at_state = *p_at_state; + break; + case ACT_FAILSDOWN: + warn("Could not shut down the device."); + /* fall through */ + case ACT_FAKESDOWN: + case ACT_SDOWN: + cs->cur_at_seq = SEQ_NONE; + finish_shutdown(cs); + break; + case ACT_CONNECT: + if (cs->onechannel) { + at_state->pending_commands |= PC_DLE1; + atomic_set(&cs->commands_pending, 1); + break; + } + bcs->chstate |= CHS_D_UP; + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); + cs->ops->init_bchannel(bcs); + break; + case ACT_DLE1: + cs->cur_at_seq = SEQ_NONE; + bcs = cs->bcs + cs->curchannel; + + bcs->chstate |= CHS_D_UP; + gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); + cs->ops->init_bchannel(bcs); + break; + case ACT_FAKEHUP: + at_state->int_var[VAR_ZSAU] = ZSAU_NULL; + /* fall through */ + case ACT_DISCONNECT: + cs->cur_at_seq = SEQ_NONE; + at_state->cid = -1; + if (bcs && cs->onechannel && cs->dle) { + /* Check for other open channels not needed: + * DLE only used for M10x with one B channel. + */ + at_state->pending_commands |= PC_DLE0; + atomic_set(&cs->commands_pending, 1); + } else { + disconnect(p_at_state); + at_state = *p_at_state; + } + break; + case ACT_FAKEDLE0: + at_state->int_var[VAR_ZDLE] = 0; + cs->dle = 0; + /* fall through */ + case ACT_DLE0: + cs->cur_at_seq = SEQ_NONE; + at_state2 = &cs->bcs[cs->curchannel].at_state; + disconnect(&at_state2); + break; + case ACT_ABORTHUP: + cs->cur_at_seq = SEQ_NONE; + warn("Could not hang up."); + at_state->cid = -1; + if (bcs && cs->onechannel) + at_state->pending_commands |= PC_DLE0; + else { + disconnect(p_at_state); + at_state = *p_at_state; + } + schedule_init(cs, MS_RECOVER); + break; + case ACT_FAILDLE0: + cs->cur_at_seq = SEQ_NONE; + warn("Could not leave DLE mode."); + at_state2 = &cs->bcs[cs->curchannel].at_state; + disconnect(&at_state2); + schedule_init(cs, MS_RECOVER); + break; + case ACT_FAILDLE1: + cs->cur_at_seq = SEQ_NONE; + warn("Could not enter DLE mode. Try to hang up."); + channel = cs->curchannel; + cs->bcs[channel].at_state.pending_commands |= PC_HUP; + atomic_set(&cs->commands_pending, 1); + break; + + case ACT_CID: /* got cid; start dialing */ + cs->cur_at_seq = SEQ_NONE; + channel = cs->curchannel; + if (ev->parameter > 0 && ev->parameter <= 65535) { + cs->bcs[channel].at_state.cid = ev->parameter; + cs->bcs[channel].at_state.pending_commands |= + PC_DIAL; + atomic_set(&cs->commands_pending, 1); + break; + } + /* fall through */ + case ACT_FAILCID: + cs->cur_at_seq = SEQ_NONE; + channel = cs->curchannel; + if (!reinit_and_retry(cs, channel)) { + warn("Could not get a call id. Dialing not possible"); + at_state2 = &cs->bcs[channel].at_state; + disconnect(&at_state2); + } + break; + case ACT_ABORTCID: + cs->cur_at_seq = SEQ_NONE; + at_state2 = &cs->bcs[cs->curchannel].at_state; + disconnect(&at_state2); + break; + + case ACT_DIALING: + case ACT_ACCEPTED: + cs->cur_at_seq = SEQ_NONE; + break; + + case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ + disconnect(p_at_state); + at_state = *p_at_state; + break; + + case ACT_ABORTDIAL: /* error/timeout during dial preparation */ + cs->cur_at_seq = SEQ_NONE; + at_state->pending_commands |= PC_HUP; + atomic_set(&cs->commands_pending, 1); + break; + + case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */ + case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */ + case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */ + at_state->pending_commands |= PC_HUP; + atomic_set(&cs->commands_pending, 1); + break; + case ACT_GETSTRING: /* warning: RING, ZDLE, ... are not handled properly any more */ + at_state->getstring = 1; + break; + case ACT_SETVER: + if (!ev->ptr) { + *p_genresp = 1; + *p_resp_code = RSP_ERROR; + break; + } + s = ev->ptr; + + if (!strcmp(s, "OK")) { + *p_genresp = 1; + *p_resp_code = RSP_ERROR; + break; + } + + for (i = 0; i < 4; ++i) { + val = simple_strtoul(s, (char **) &e, 10); + if (val > INT_MAX || e == s) + break; + if (i == 3) { + if (*e) + break; + } else if (*e != '.') + break; + else + s = e + 1; + cs->fwver[i] = val; + } + if (i != 4) { + *p_genresp = 1; + *p_resp_code = RSP_ERROR; + break; + } + /*at_state->getstring = 1;*/ + cs->gotfwver = 0; + break; + case ACT_GOTVER: + if (cs->gotfwver == 0) { + cs->gotfwver = 1; + dbg(DEBUG_ANY, + "firmware version %02d.%03d.%02d.%02d", + cs->fwver[0], cs->fwver[1], + cs->fwver[2], cs->fwver[3]); + break; + } + /* fall through */ + case ACT_FAILVER: + cs->gotfwver = -1; + err("could not read firmware version."); + break; +#ifdef CONFIG_GIGASET_DEBUG + case ACT_ERROR: + *p_genresp = 1; + *p_resp_code = RSP_ERROR; + break; + case ACT_TEST: + { + static int count = 3; //2; //1; + *p_genresp = 1; + *p_resp_code = count ? RSP_ERROR : RSP_OK; + if (count > 0) + --count; + } + break; +#endif + case ACT_DEBUG: + dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d", + __func__, ev->type, at_state->ConState); + break; + case ACT_WARN: + warn("%s: resp_code %d in ConState %d!", + __func__, ev->type, at_state->ConState); + break; + case ACT_ZCAU: + warn("cause code %04x in connection state %d.", + ev->parameter, at_state->ConState); + break; + + /* events from the LL */ + case ACT_DIAL: + start_dial(at_state, ev->ptr, ev->parameter); + break; + case ACT_ACCEPT: + start_accept(at_state); + break; + case ACT_PROTO_L2: + dbg(DEBUG_CMD, + "set protocol to %u", (unsigned) ev->parameter); + at_state->bcs->proto2 = ev->parameter; + break; + case ACT_HUP: + at_state->pending_commands |= PC_HUP; + atomic_set(&cs->commands_pending, 1); //FIXME + dbg(DEBUG_CMD, "Scheduling PC_HUP"); + break; + + /* hotplug events */ + case ACT_STOP: + do_stop(cs); + break; + case ACT_START: + do_start(cs); + break; + + /* events from the interface */ // FIXME without ACT_xxxx? + case ACT_IF_LOCK: + cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); + cs->waiting = 0; + wake_up(&cs->waitqueue); + break; + case ACT_IF_VER: + if (ev->parameter != 0) + cs->cmd_result = -EINVAL; + else if (cs->gotfwver != 1) { + cs->cmd_result = -ENOENT; + } else { + memcpy(ev->arg, cs->fwver, sizeof cs->fwver); + cs->cmd_result = 0; + } + cs->waiting = 0; + wake_up(&cs->waitqueue); + break; + + /* events from the proc file system */ // FIXME without ACT_xxxx? + case ACT_PROC_CIDMODE: + if (ev->parameter != atomic_read(&cs->cidmode)) { + atomic_set(&cs->cidmode, ev->parameter); + if (ev->parameter) { + cs->at_state.pending_commands |= PC_CIDMODE; + dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); + } else { + cs->at_state.pending_commands |= PC_UMMODE; + dbg(DEBUG_CMD, "Scheduling PC_UMMODE"); + } + atomic_set(&cs->commands_pending, 1); + } + cs->waiting = 0; + wake_up(&cs->waitqueue); + break; + + /* events from the hardware drivers */ + case ACT_NOTIFY_BC_DOWN: + bchannel_down(bcs); + break; + case ACT_NOTIFY_BC_UP: + bchannel_up(bcs); + break; + case ACT_SHUTDOWN: + do_shutdown(cs); + break; + + + default: + if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) { + *pp_command = at_state->bcs->commands[action - ACT_CMD]; + if (!*pp_command) { + *p_genresp = 1; + *p_resp_code = RSP_NULL; + } + } else + err("%s: action==%d!", __func__, action); + } +} + +/* State machine to do the calling and hangup procedure */ +static void process_event(struct cardstate *cs, struct event_t *ev) +{ + struct bc_state *bcs; + char *p_command = NULL; + struct reply_t *rep; + int rcode; + int genresp = 0; + int resp_code = RSP_ERROR; + int sendcid; + struct at_state_t *at_state; + int index; + int curact; + unsigned long flags; + + IFNULLRET(cs); + IFNULLRET(ev); + + if (ev->cid >= 0) { + at_state = at_state_from_cid(cs, ev->cid); + if (!at_state) { + gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID, + NULL, 0, NULL); + return; + } + } else { + at_state = ev->at_state; + if (at_state_invalid(cs, at_state)) { + dbg(DEBUG_ANY, + "event for invalid at_state %p", at_state); + return; + } + } + + dbg(DEBUG_CMD, + "connection state %d, event %d", at_state->ConState, ev->type); + + bcs = at_state->bcs; + sendcid = at_state->cid; + + /* Setting the pointer to the dial array */ + rep = at_state->replystruct; + IFNULLRET(rep); + + if (ev->type == EV_TIMEOUT) { + if (ev->parameter != atomic_read(&at_state->timer_index) + || !at_state->timer_active) { + ev->type = RSP_NONE; /* old timeout */ + dbg(DEBUG_ANY, "old timeout"); + } else if (!at_state->waiting) + dbg(DEBUG_ANY, "timeout occured"); + else + dbg(DEBUG_ANY, "stopped waiting"); + } + + /* if the response belongs to a variable in at_state->int_var[VAR_XXXX] or at_state->str_var[STR_XXXX], set it */ + if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) { + index = ev->type - RSP_VAR; + at_state->int_var[index] = ev->parameter; + } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) { + index = ev->type - RSP_STR; + kfree(at_state->str_var[index]); + at_state->str_var[index] = ev->ptr; + ev->ptr = NULL; /* prevent process_events() from deallocating ptr */ + } + + if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING) + at_state->getstring = 0; + + /* Search row in dial array which matches modem response and current constate */ + for (;; rep++) { + rcode = rep->resp_code; + /* dbg (DEBUG_ANY, "rcode %d", rcode); */ + if (rcode == RSP_LAST) { + /* found nothing...*/ + warn("%s: rcode=RSP_LAST: resp_code %d in ConState %d!", + __func__, ev->type, at_state->ConState); + return; + } + if ((rcode == RSP_ANY || rcode == ev->type) + && ((int) at_state->ConState >= rep->min_ConState) + && (rep->max_ConState < 0 + || (int) at_state->ConState <= rep->max_ConState) + && (rep->parameter < 0 || rep->parameter == ev->parameter)) + break; + } + + p_command = rep->command; + + at_state->waiting = 0; + for (curact = 0; curact < MAXACT; ++curact) { + /* The row tells us what we should do .. + */ + do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev); + if (!at_state) + break; /* may be freed after disconnect */ + } + + if (at_state) { + /* Jump to the next con-state regarding the array */ + if (rep->new_ConState >= 0) + at_state->ConState = rep->new_ConState; + + if (genresp) { + spin_lock_irqsave(&cs->lock, flags); + at_state->timer_expires = 0; //FIXME + at_state->timer_active = 0; //FIXME + spin_unlock_irqrestore(&cs->lock, flags); + gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); + } else { + /* Send command to modem if not NULL... */ + if (p_command/*rep->command*/) { + if (atomic_read(&cs->connected)) + send_command(cs, p_command, + sendcid, cs->dle, + GFP_ATOMIC); + else + gigaset_add_event(cs, at_state, + RSP_NODEV, + NULL, 0, NULL); + } + + spin_lock_irqsave(&cs->lock, flags); + if (!rep->timeout) { + at_state->timer_expires = 0; + at_state->timer_active = 0; + } else if (rep->timeout > 0) { /* new timeout */ + at_state->timer_expires = rep->timeout * 10; + at_state->timer_active = 1; + new_index(&at_state->timer_index, + MAX_TIMER_INDEX); + } + spin_unlock_irqrestore(&cs->lock, flags); + } + } +} + +static void schedule_sequence(struct cardstate *cs, + struct at_state_t *at_state, int sequence) +{ + cs->cur_at_seq = sequence; + gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL); +} + +static void process_command_flags(struct cardstate *cs) +{ + struct at_state_t *at_state = NULL; + struct bc_state *bcs; + int i; + int sequence; + + IFNULLRET(cs); + + atomic_set(&cs->commands_pending, 0); + + if (cs->cur_at_seq) { + dbg(DEBUG_CMD, "not searching scheduled commands: busy"); + return; + } + + dbg(DEBUG_CMD, "searching scheduled commands"); + + sequence = SEQ_NONE; + + /* clear pending_commands and hangup channels on shutdown */ + if (cs->at_state.pending_commands & PC_SHUTDOWN) { + cs->at_state.pending_commands &= ~PC_CIDMODE; + for (i = 0; i < cs->channels; ++i) { + bcs = cs->bcs + i; + at_state = &bcs->at_state; + at_state->pending_commands &= + ~(PC_DLE1 | PC_ACCEPT | PC_DIAL); + if (at_state->cid > 0) + at_state->pending_commands |= PC_HUP; + if (at_state->pending_commands & PC_CID) { + at_state->pending_commands |= PC_NOCID; + at_state->pending_commands &= ~PC_CID; + } + } + } + + /* clear pending_commands and hangup channels on reset */ + if (cs->at_state.pending_commands & PC_INIT) { + cs->at_state.pending_commands &= ~PC_CIDMODE; + for (i = 0; i < cs->channels; ++i) { + bcs = cs->bcs + i; + at_state = &bcs->at_state; + at_state->pending_commands &= + ~(PC_DLE1 | PC_ACCEPT | PC_DIAL); + if (at_state->cid > 0) + at_state->pending_commands |= PC_HUP; + if (atomic_read(&cs->mstate) == MS_RECOVER) { + if (at_state->pending_commands & PC_CID) { + at_state->pending_commands |= PC_NOCID; + at_state->pending_commands &= ~PC_CID; + } + } + } + } + + /* only switch back to unimodem mode, if no commands are pending and no channels are up */ + if (cs->at_state.pending_commands == PC_UMMODE + && !atomic_read(&cs->cidmode) + && list_empty(&cs->temp_at_states) + && atomic_read(&cs->mode) == M_CID) { + sequence = SEQ_UMMODE; + at_state = &cs->at_state; + for (i = 0; i < cs->channels; ++i) { + bcs = cs->bcs + i; + if (bcs->at_state.pending_commands || + bcs->at_state.cid > 0) { + sequence = SEQ_NONE; + break; + } + } + } + cs->at_state.pending_commands &= ~PC_UMMODE; + if (sequence != SEQ_NONE) { + schedule_sequence(cs, at_state, sequence); + return; + } + + for (i = 0; i < cs->channels; ++i) { + bcs = cs->bcs + i; + if (bcs->at_state.pending_commands & PC_HUP) { + bcs->at_state.pending_commands &= ~PC_HUP; + if (bcs->at_state.pending_commands & PC_CID) { + /* not yet dialing: PC_NOCID is sufficient */ + bcs->at_state.pending_commands |= PC_NOCID; + bcs->at_state.pending_commands &= ~PC_CID; + } else { + schedule_sequence(cs, &bcs->at_state, SEQ_HUP); + return; + } + } + if (bcs->at_state.pending_commands & PC_NOCID) { + bcs->at_state.pending_commands &= ~PC_NOCID; + cs->curchannel = bcs->channel; + schedule_sequence(cs, &cs->at_state, SEQ_NOCID); + return; + } else if (bcs->at_state.pending_commands & PC_DLE0) { + bcs->at_state.pending_commands &= ~PC_DLE0; + cs->curchannel = bcs->channel; + schedule_sequence(cs, &cs->at_state, SEQ_DLE0); + return; + } + } + + list_for_each_entry(at_state, &cs->temp_at_states, list) + if (at_state->pending_commands & PC_HUP) { + at_state->pending_commands &= ~PC_HUP; + schedule_sequence(cs, at_state, SEQ_HUP); + return; + } + + if (cs->at_state.pending_commands & PC_INIT) { + cs->at_state.pending_commands &= ~PC_INIT; + cs->dle = 0; //FIXME + cs->inbuf->inputstate = INS_command; + //FIXME reset card state (or -> LOCK0)? + schedule_sequence(cs, &cs->at_state, SEQ_INIT); + return; + } + if (cs->at_state.pending_commands & PC_SHUTDOWN) { + cs->at_state.pending_commands &= ~PC_SHUTDOWN; + schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN); + return; + } + if (cs->at_state.pending_commands & PC_CIDMODE) { + cs->at_state.pending_commands &= ~PC_CIDMODE; + if (atomic_read(&cs->mode) == M_UNIMODEM) { +#if 0 + cs->retry_count = 2; +#else + cs->retry_count = 1; +#endif + schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE); + return; + } + } + + for (i = 0; i < cs->channels; ++i) { + bcs = cs->bcs + i; + if (bcs->at_state.pending_commands & PC_DLE1) { + bcs->at_state.pending_commands &= ~PC_DLE1; + cs->curchannel = bcs->channel; + schedule_sequence(cs, &cs->at_state, SEQ_DLE1); + return; + } + if (bcs->at_state.pending_commands & PC_ACCEPT) { + bcs->at_state.pending_commands &= ~PC_ACCEPT; + schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT); + return; + } + if (bcs->at_state.pending_commands & PC_DIAL) { + bcs->at_state.pending_commands &= ~PC_DIAL; + schedule_sequence(cs, &bcs->at_state, SEQ_DIAL); + return; + } + if (bcs->at_state.pending_commands & PC_CID) { + switch (atomic_read(&cs->mode)) { + case M_UNIMODEM: + cs->at_state.pending_commands |= PC_CIDMODE; + dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); + atomic_set(&cs->commands_pending, 1); + return; +#ifdef GIG_MAYINITONDIAL + case M_UNKNOWN: + schedule_init(cs, MS_INIT); + return; +#endif + } + bcs->at_state.pending_commands &= ~PC_CID; + cs->curchannel = bcs->channel; +#ifdef GIG_RETRYCID + cs->retry_count = 2; +#else + cs->retry_count = 1; +#endif + schedule_sequence(cs, &cs->at_state, SEQ_CID); + return; + } + } +} + +static void process_events(struct cardstate *cs) +{ + struct event_t *ev; + unsigned head, tail; + int i; + int check_flags = 0; + int was_busy; + + /* no locking needed (only one reader) */ + head = atomic_read(&cs->ev_head); + + for (i = 0; i < 2 * MAX_EVENTS; ++i) { + tail = atomic_read(&cs->ev_tail); + if (tail == head) { + if (!check_flags && !atomic_read(&cs->commands_pending)) + break; + check_flags = 0; + process_command_flags(cs); + tail = atomic_read(&cs->ev_tail); + if (tail == head) { + if (!atomic_read(&cs->commands_pending)) + break; + continue; + } + } + + ev = cs->events + head; + was_busy = cs->cur_at_seq != SEQ_NONE; + process_event(cs, ev); + kfree(ev->ptr); + ev->ptr = NULL; + if (was_busy && cs->cur_at_seq == SEQ_NONE) + check_flags = 1; + + head = (head + 1) % MAX_EVENTS; + atomic_set(&cs->ev_head, head); + } + + if (i == 2 * MAX_EVENTS) { + err("infinite loop in process_events; aborting."); + } +} + +/* tasklet scheduled on any event received from the Gigaset device + * parameter: + * data ISDN controller state structure + */ +void gigaset_handle_event(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + + IFNULLRET(cs); + IFNULLRET(cs->inbuf); + + /* handle incoming data on control/common channel */ + if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) { + dbg(DEBUG_INTR, "processing new data"); + cs->ops->handle_input(cs->inbuf); + } + + process_events(cs); +} diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h new file mode 100644 index 000000000000..729edcdb6dac --- /dev/null +++ b/drivers/isdn/gigaset/gigaset.h @@ -0,0 +1,938 @@ +/* Siemens Gigaset 307x driver + * Common header file for all connection variants + * + * Written by Stefan Eilers <Eilers.Stefan@epost.de> + * and Hansjoerg Lipp <hjlipp@web.de> + * + * Version: $Id: gigaset.h,v 1.97.4.26 2006/02/04 18:28:16 hjlipp Exp $ + * =========================================================================== + */ + +#ifndef GIGASET_H +#define GIGASET_H + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/atomic.h> +#include <linux/spinlock.h> +#include <linux/isdnif.h> +#include <linux/usb.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/ppp_defs.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/list.h> + +#define GIG_VERSION {0,5,0,0} +#define GIG_COMPAT {0,4,0,0} + +#define MAX_REC_PARAMS 10 /* Max. number of params in response string */ +#define MAX_RESP_SIZE 512 /* Max. size of a response string */ +#define HW_HDR_LEN 2 /* Header size used to store ack info */ + +#define MAX_EVENTS 64 /* size of event queue */ + +#define RBUFSIZE 8192 +#define SBUFSIZE 4096 /* sk_buff payload size */ + +#define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */ +#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */ + +/* compile time options */ +#define GIG_MAJOR 0 + +#define GIG_MAYINITONDIAL +#define GIG_RETRYCID +#define GIG_X75 + +#define MAX_TIMER_INDEX 1000 +#define MAX_SEQ_INDEX 1000 + +#define GIG_TICK (HZ / 10) + +/* timeout values (unit: 1 sec) */ +#define INIT_TIMEOUT 1 + +/* timeout values (unit: 0.1 sec) */ +#define RING_TIMEOUT 3 /* for additional parameters to RING */ +#define BAS_TIMEOUT 20 /* for response to Base USB ops */ +#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */ + +#define BAS_RETRY 3 /* max. retries for base USB ops */ + +#define MAXACT 3 + +#define IFNULL(a) if (unlikely(!(a))) +#define IFNULLRET(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return; } +#define IFNULLRETVAL(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return (b); } +#define IFNULLCONT(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); continue; } +#define IFNULLGOTO(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); goto b; } + +extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */ + +/* any combination of these can be given with the 'debug=' parameter to insmod, e.g. + * 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and DEBUG_INTR. */ +enum debuglevel { /* up to 24 bits (atomic_t) */ + DEBUG_REG = 0x0002, /* serial port I/O register operations */ + DEBUG_OPEN = 0x0004, /* open/close serial port */ + DEBUG_INTR = 0x0008, /* interrupt processing */ + DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on interrupt + requests, not available as run-time option */ + DEBUG_CMD = 0x00020, /* sent/received LL commands */ + DEBUG_STREAM = 0x00040, /* application data stream I/O events */ + DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */ + DEBUG_LLDATA = 0x00100, /* sent/received LL data */ + DEBUG_INTR_0 = 0x00200, /* serial port output interrupt processing */ + DEBUG_DRIVER = 0x00400, /* driver structure */ + DEBUG_HDLC = 0x00800, /* M10x HDLC processing */ + DEBUG_WRITE = 0x01000, /* M105 data write */ + DEBUG_TRANSCMD = 0x02000, /*AT-COMMANDS+RESPONSES*/ + DEBUG_MCMD = 0x04000, /*COMMANDS THAT ARE SENT VERY OFTEN*/ + DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data structures */ + DEBUG_LOCK = 0x10000, /* semaphore operations */ + DEBUG_OUTPUT = 0x20000, /* output to device */ + DEBUG_ISO = 0x40000, /* isochronous transfers */ + DEBUG_IF = 0x80000, /* character device operations */ + DEBUG_USBREQ = 0x100000, /* USB communication (except payload data) */ + DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when MS_LOCKED */ + + DEBUG_ANY = 0x3fffff, /* print message if any of the others is activated */ +}; + +#ifdef CONFIG_GIGASET_DEBUG +#define DEBUG_DEFAULT (DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ) +//#define DEBUG_DEFAULT (DEBUG_LOCK | DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUF_IF | DEBUG_DRIVER | DEBUG_OUTPUT | DEBUG_INTR) +#else +#define DEBUG_DEFAULT 0 +#endif + +/* redefine syslog macros to prepend module name instead of entire source path */ +/* The space before the comma in ", ##" is needed by gcc 2.95 */ +#undef info +#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) + +#undef notice +#define notice(format, arg...) printk(KERN_NOTICE "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) + +#undef warn +#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) + +#undef err +#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) + +#undef dbg +#ifdef CONFIG_GIGASET_DEBUG +#define dbg(level, format, arg...) do { if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \ + printk(KERN_DEBUG "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg); } while (0) +#else +#define dbg(level, format, arg...) do {} while (0) +#endif + +void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, + size_t len, const unsigned char *buf, int from_user); + +/* connection state */ +#define ZSAU_NONE 0 +#define ZSAU_DISCONNECT_IND 4 +#define ZSAU_OUTGOING_CALL_PROCEEDING 1 +#define ZSAU_PROCEEDING 1 +#define ZSAU_CALL_DELIVERED 2 +#define ZSAU_ACTIVE 3 +#define ZSAU_NULL 5 +#define ZSAU_DISCONNECT_REQ 6 +#define ZSAU_UNKNOWN -1 + +/* USB control transfer requests */ +#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) +#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) + +/* int-in-events 3070 */ +#define HD_B1_FLOW_CONTROL 0x80 +#define HD_B2_FLOW_CONTROL 0x81 +#define HD_RECEIVEATDATA_ACK (0x35) // 3070 // att: HD_RECEIVE>>AT<<DATA_ACK +#define HD_READY_SEND_ATDATA (0x36) // 3070 +#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070 +#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070 +#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070 +#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070 +#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070 +#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070 +#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070 +// Powermangment +#define HD_SUSPEND_END (0x61) // ISurf USB +// Configuration +#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070 + +/* control requests 3070 */ +#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070 +#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070 +#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070 +#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070 +#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070 +#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070 +#define HD_WRITE_ATMESSAGE (0x12) // 3070 +#define HD_READ_ATMESSAGE (0x13) // 3070 +#define HD_OPEN_ATCHANNEL (0x28) // 3070 +#define HD_CLOSE_ATCHANNEL (0x29) // 3070 + +/* USB frames for isochronous transfer */ +#define BAS_FRAMETIME 1 /* number of milliseconds between frames */ +#define BAS_NUMFRAMES 8 /* number of frames per URB */ +#define BAS_MAXFRAME 16 /* allocated bytes per frame */ +#define BAS_NORMFRAME 8 /* send size without flow control */ +#define BAS_HIGHFRAME 10 /* " " with positive flow control */ +#define BAS_LOWFRAME 5 /* " " with negative flow control */ +#define BAS_CORRFRAMES 4 /* flow control multiplicator */ + +#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isochronous input buffer per URB */ +#define BAS_OUTBUFSIZE 4096 /* size of common isochronous output buffer */ +#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isochronous output buffer */ + +#define BAS_INURBS 3 +#define BAS_OUTURBS 3 + +/* variable commands in struct bc_state */ +#define AT_ISO 0 +#define AT_DIAL 1 +#define AT_MSN 2 +#define AT_BC 3 +#define AT_PROTO 4 +#define AT_TYPE 5 +#define AT_HLC 6 +#define AT_NUM 7 + +/* variables in struct at_state_t */ +#define VAR_ZSAU 0 +#define VAR_ZDLE 1 +#define VAR_ZVLS 2 +#define VAR_ZCTP 3 +#define VAR_NUM 4 + +#define STR_NMBR 0 +#define STR_ZCPN 1 +#define STR_ZCON 2 +#define STR_ZBC 3 +#define STR_ZHLC 4 +#define STR_NUM 5 + +#define EV_TIMEOUT -105 +#define EV_IF_VER -106 +#define EV_PROC_CIDMODE -107 +#define EV_SHUTDOWN -108 +#define EV_START -110 +#define EV_STOP -111 +#define EV_IF_LOCK -112 +#define EV_PROTO_L2 -113 +#define EV_ACCEPT -114 +#define EV_DIAL -115 +#define EV_HUP -116 +#define EV_BC_OPEN -117 +#define EV_BC_CLOSED -118 + +/* input state */ +#define INS_command 0x0001 +#define INS_DLE_char 0x0002 +#define INS_byte_stuff 0x0004 +#define INS_have_data 0x0008 +#define INS_skip_frame 0x0010 +#define INS_DLE_command 0x0020 +#define INS_flag_hunt 0x0040 + +/* channel state */ +#define CHS_D_UP 0x01 +#define CHS_B_UP 0x02 +#define CHS_NOTIFY_LL 0x04 + +#define ICALL_REJECT 0 +#define ICALL_ACCEPT 1 +#define ICALL_IGNORE 2 + +/* device state */ +#define MS_UNINITIALIZED 0 +#define MS_INIT 1 +#define MS_LOCKED 2 +#define MS_SHUTDOWN 3 +#define MS_RECOVER 4 +#define MS_READY 5 + +/* mode */ +#define M_UNKNOWN 0 +#define M_CONFIG 1 +#define M_UNIMODEM 2 +#define M_CID 3 + +/* start mode */ +#define SM_LOCKED 0 +#define SM_ISDN 1 /* default */ + +struct gigaset_ops; +struct gigaset_driver; + +struct usb_cardstate; +struct ser_cardstate; +struct bas_cardstate; + +struct bc_state; +struct usb_bc_state; +struct ser_bc_state; +struct bas_bc_state; + +struct reply_t { + int resp_code; /* RSP_XXXX */ + int min_ConState; /* <0 => ignore */ + int max_ConState; /* <0 => ignore */ + int parameter; /* e.g. ZSAU_XXXX <0: ignore*/ + int new_ConState; /* <0 => ignore */ + int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/ + int action[MAXACT]; /* ACT_XXXX */ + char *command; /* NULL==none */ +}; + +extern struct reply_t gigaset_tab_cid_m10x[]; +extern struct reply_t gigaset_tab_nocid_m10x[]; + +struct inbuf_t { + unsigned char *rcvbuf; /* usb-gigaset receive buffer */ + struct bc_state *bcs; + struct cardstate *cs; + int inputstate; + + atomic_t head, tail; + unsigned char data[RBUFSIZE]; +}; + +/* isochronous write buffer structure + * circular buffer with pad area for extraction of complete USB frames + * - data[read..nextread-1] is valid data already submitted to the USB subsystem + * - data[nextread..write-1] is valid data yet to be sent + * - data[write] is the next byte to write to + * - in byte-oriented L2 procotols, it is completely free + * - in bit-oriented L2 procotols, it may contain a partial byte of valid data + * - data[write+1..read-1] is free + * - wbits is the number of valid data bits in data[write], starting at the LSB + * - writesem is the semaphore for writing to the buffer: + * if writesem <= 0, data[write..read-1] is currently being written to + * - idle contains the byte value to repeat when the end of valid data is + * reached; if nextread==write (buffer contains no data to send), either the + * BAS_OUTBUFPAD bytes immediately before data[write] (if write>=BAS_OUTBUFPAD) + * or those of the pad area (if write<BAS_OUTBUFPAD) are also filled with that + * value + * - optionally, the following statistics on the buffer's usage can be collected: + * maxfill: maximum number of bytes occupied + * idlefills: number of times a frame of idle bytes is prepared + * emptygets: number of times the buffer was empty when a data frame was requested + * backtoback: number of times two data packets were entered into the buffer + * without intervening idle flags + * nakedback: set if no idle flags have been inserted since the last data packet + */ +struct isowbuf_t { + atomic_t read; + atomic_t nextread; + atomic_t write; + atomic_t writesem; + int wbits; + unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD]; + unsigned char idle; +}; + +/* isochronous write URB context structure + * data to be stored along with the URB and retrieved when it is returned + * as completed by the USB subsystem + * - urb: pointer to the URB itself + * - bcs: pointer to the B Channel control structure + * - limit: end of write buffer area covered by this URB + */ +struct isow_urbctx_t { + struct urb *urb; + struct bc_state *bcs; + int limit; +}; + +/* AT state structure + * data associated with the state of an ISDN connection, whether or not + * it is currently assigned a B channel + */ +struct at_state_t { + struct list_head list; + int waiting; + int getstring; + atomic_t timer_index; + unsigned long timer_expires; + int timer_active; + unsigned int ConState; /* State of connection */ + struct reply_t *replystruct; + int cid; + int int_var[VAR_NUM]; /* see VAR_XXXX */ + char *str_var[STR_NUM]; /* see STR_XXXX */ + unsigned pending_commands; /* see PC_XXXX */ + atomic_t seq_index; + + struct cardstate *cs; + struct bc_state *bcs; +}; + +struct resp_type_t { + unsigned char *response; + int resp_code; /* RSP_XXXX */ + int type; /* RT_XXXX */ +}; + +struct prot_skb { + atomic_t empty; + struct semaphore *sem; + struct sk_buff *skb; +}; + +struct event_t { + int type; + void *ptr, *arg; + int parameter; + int cid; + struct at_state_t *at_state; +}; + +/* This buffer holds all information about the used B-Channel */ +struct bc_state { + struct sk_buff *tx_skb; /* Current transfer buffer to modem */ + struct sk_buff_head squeue; /* B-Channel send Queue */ + + /* Variables for debugging .. */ + int corrupted; /* Counter for corrupted packages */ + int trans_down; /* Counter of packages (downstream) */ + int trans_up; /* Counter of packages (upstream) */ + + struct at_state_t at_state; + unsigned long rcvbytes; + + __u16 fcs; + struct sk_buff *skb; + int inputstate; /* see INS_XXXX */ + + int channel; + + struct cardstate *cs; + + unsigned chstate; /* bitmap (CHS_*) */ + int ignore; + unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */ + char *commands[AT_NUM]; /* see AT_XXXX */ + +#ifdef CONFIG_GIGASET_DEBUG + int emptycount; +#endif + int busy; + int use_count; + + /* hardware drivers */ + union { + struct ser_bc_state *ser; /* private data of serial hardware driver */ + struct usb_bc_state *usb; /* private data of usb hardware driver */ + struct bas_bc_state *bas; + } hw; +}; + +struct cardstate { + struct gigaset_driver *driver; + unsigned minor_index; + + const struct gigaset_ops *ops; + + /* Stuff to handle communication */ + //wait_queue_head_t initwait; + wait_queue_head_t waitqueue; + int waiting; + atomic_t mode; /* see M_XXXX */ + atomic_t mstate; /* Modem state: see MS_XXXX */ + /* only changed by the event layer */ + int cmd_result; + + int channels; + struct bc_state *bcs; /* Array of struct bc_state */ + + int onechannel; /* data and commands transmitted in one stream (M10x) */ + + spinlock_t lock; + struct at_state_t at_state; /* at_state_t for cid == 0 */ + struct list_head temp_at_states; /* list of temporary "struct at_state_t"s without B channel */ + + struct inbuf_t *inbuf; + + struct cmdbuf_t *cmdbuf, *lastcmdbuf; + spinlock_t cmdlock; + unsigned curlen, cmdbytes; + + unsigned open_count; + struct tty_struct *tty; + struct tasklet_struct if_wake_tasklet; + unsigned control_state; + + unsigned fwver[4]; + int gotfwver; + + atomic_t running; /* !=0 if events are handled */ + atomic_t connected; /* !=0 if hardware is connected */ + + atomic_t cidmode; + + int myid; /* id for communication with LL */ + isdn_if iif; + + struct reply_t *tabnocid; + struct reply_t *tabcid; + int cs_init; + int ignoreframes; /* frames to ignore after setting up the B channel */ + struct semaphore sem; /* locks this structure: */ + /* connected is not changed, */ + /* hardware_up is not changed, */ + /* MState is not changed to or from MS_LOCKED */ + + struct timer_list timer; + int retry_count; + int dle; /* !=0 if modem commands/responses are dle encoded */ + int cur_at_seq; /* sequence of AT commands being processed */ + int curchannel; /* channel, those commands are meant for */ + atomic_t commands_pending; /* flag(s) in xxx.commands_pending have been set */ + struct tasklet_struct event_tasklet; /* tasklet for serializing AT commands. Scheduled + * -> for modem reponses (and incomming data for M10x) + * -> on timeout + * -> after setting bits in xxx.at_state.pending_command + * (e.g. command from LL) */ + struct tasklet_struct write_tasklet; /* tasklet for serial output + * (not used in base driver) */ + + /* event queue */ + struct event_t events[MAX_EVENTS]; + atomic_t ev_tail, ev_head; + spinlock_t ev_lock; + + /* current modem response */ + unsigned char respdata[MAX_RESP_SIZE]; + unsigned cbytes; + + /* hardware drivers */ + union { + struct usb_cardstate *usb; /* private data of USB hardware driver */ + struct ser_cardstate *ser; /* private data of serial hardware driver */ + struct bas_cardstate *bas; /* private data of base hardware driver */ + } hw; +}; + +struct gigaset_driver { + struct list_head list; + spinlock_t lock; /* locks minor tables and blocked */ + //struct semaphore sem; /* locks this structure */ + struct tty_driver *tty; + unsigned have_tty; + unsigned minor; + unsigned minors; + struct cardstate *cs; + unsigned *flags; + int blocked; + + const struct gigaset_ops *ops; + struct module *owner; +}; + +struct cmdbuf_t { + struct cmdbuf_t *next, *prev; + int len, offset; + struct tasklet_struct *wake_tasklet; + unsigned char buf[0]; +}; + +struct bas_bc_state { + /* isochronous output state */ + atomic_t running; + atomic_t corrbytes; + spinlock_t isooutlock; + struct isow_urbctx_t isoouturbs[BAS_OUTURBS]; + struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl; + struct isowbuf_t *isooutbuf; + unsigned numsub; /* submitted URB counter (for diagnostic messages only) */ + struct tasklet_struct sent_tasklet; + + /* isochronous input state */ + spinlock_t isoinlock; + struct urb *isoinurbs[BAS_INURBS]; + unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS]; + struct urb *isoindone; /* completed isoc read URB */ + int loststatus; /* status of dropped URB */ + unsigned isoinlost; /* number of bytes lost */ + /* state of bit unstuffing algorithm (in addition to BC_state.inputstate) */ + unsigned seqlen; /* number of '1' bits not yet unstuffed */ + unsigned inbyte, inbits; /* collected bits for next byte */ + /* statistics */ + unsigned goodbytes; /* bytes correctly received */ + unsigned alignerrs; /* frames with incomplete byte at end */ + unsigned fcserrs; /* FCS errors */ + unsigned frameerrs; /* framing errors */ + unsigned giants; /* long frames */ + unsigned runts; /* short frames */ + unsigned aborts; /* HDLC aborts */ + unsigned shared0s; /* '0' bits shared between flags */ + unsigned stolen0s; /* '0' stuff bits also serving as leading flag bits */ + struct tasklet_struct rcvd_tasklet; +}; + +struct gigaset_ops { + /* Called from ev-layer.c/interface.c for sending AT commands to the device */ + int (*write_cmd)(struct cardstate *cs, + const unsigned char *buf, int len, + struct tasklet_struct *wake_tasklet); + + /* Called from interface.c for additional device control */ + int (*write_room)(struct cardstate *cs); + int (*chars_in_buffer)(struct cardstate *cs); + int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]); + + /* Called from ev-layer.c after setting up connection + * Should call gigaset_bchannel_up(), when finished. */ + int (*init_bchannel)(struct bc_state *bcs); + + /* Called from ev-layer.c after hanging up + * Should call gigaset_bchannel_down(), when finished. */ + int (*close_bchannel)(struct bc_state *bcs); + + /* Called by gigaset_initcs() for setting up bcs->hw.xxx */ + int (*initbcshw)(struct bc_state *bcs); + + /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ + int (*freebcshw)(struct bc_state *bcs); + + /* Called by gigaset_stop() or gigaset_bchannel_down() for resetting bcs->hw.xxx */ + void (*reinitbcshw)(struct bc_state *bcs); + + /* Called by gigaset_initcs() for setting up cs->hw.xxx */ + int (*initcshw)(struct cardstate *cs); + + /* Called by gigaset_freecs() for freeing cs->hw.xxx */ + void (*freecshw)(struct cardstate *cs); + + ///* Called by gigaset_stop() for killing URBs, shutting down the device, ... + // hardwareup: ==0: don't try to shut down the device, hardware is really not accessible + // !=0: hardware still up */ + //void (*stophw)(struct cardstate *cs, int hardwareup); + + /* Called from common.c/interface.c for additional serial port control */ + int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, unsigned new_state); + int (*baud_rate)(struct cardstate *cs, unsigned cflag); + int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); + + /* Called from i4l.c to put an skb into the send-queue. */ + int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); + + /* Called from ev-layer.c to process a block of data + * received through the common/control channel. */ + void (*handle_input)(struct inbuf_t *inbuf); + +}; + +/* = Common structures and definitions ======================================= */ + +/* Parser states for DLE-Event: + * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "." + * <DLE_FLAG>: 0x10 + * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+ + */ +#define DLE_FLAG 0x10 + +/* =========================================================================== + * Functions implemented in asyncdata.c + */ + +/* Called from i4l.c to put an skb into the send-queue. + * After sending gigaset_skb_sent() should be called. */ +int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb); + +/* Called from ev-layer.c to process a block of data + * received through the common/control channel. */ +void gigaset_m10x_input(struct inbuf_t *inbuf); + +/* =========================================================================== + * Functions implemented in isocdata.c + */ + +/* Called from i4l.c to put an skb into the send-queue. + * After sending gigaset_skb_sent() should be called. */ +int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb); + +/* Called from ev-layer.c to process a block of data + * received through the common/control channel. */ +void gigaset_isoc_input(struct inbuf_t *inbuf); + +/* Called from bas-gigaset.c to process a block of data + * received through the isochronous channel */ +void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs); + +/* Called from bas-gigaset.c to put a block of data + * into the isochronous output buffer */ +int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len); + +/* Called from bas-gigaset.c to initialize the isochronous output buffer */ +void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle); + +/* Called from bas-gigaset.c to retrieve a block of bytes for sending */ +int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size); + +/* =========================================================================== + * Functions implemented in i4l.c/gigaset.h + */ + +/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */ +int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid); + +/* Called from xxx-gigaset.c to indicate completion of sending an skb */ +void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); + +/* Called from common.c/ev-layer.c to indicate events relevant to the LL */ +int gigaset_isdn_icall(struct at_state_t *at_state); +int gigaset_isdn_setup_accept(struct at_state_t *at_state); +int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data); + +void gigaset_i4l_cmd(struct cardstate *cs, int cmd); +void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd); + + +static inline void gigaset_isdn_rcv_err(struct bc_state *bcs) +{ + isdn_ctrl response; + + /* error -> LL */ + dbg(DEBUG_CMD, "sending L1ERR"); + response.driver = bcs->cs->myid; + response.command = ISDN_STAT_L1ERR; + response.arg = bcs->channel; + response.parm.errcode = ISDN_STAT_L1ERR_RECV; + bcs->cs->iif.statcallb(&response); +} + +/* =========================================================================== + * Functions implemented in ev-layer.c + */ + +/* tasklet called from common.c to process queued events */ +void gigaset_handle_event(unsigned long data); + +/* called from isocdata.c / asyncdata.c + * when a complete modem response line has been received */ +void gigaset_handle_modem_response(struct cardstate *cs); + +/* =========================================================================== + * Functions implemented in proc.c + */ + +/* initialize sysfs for device */ +void gigaset_init_dev_sysfs(struct usb_interface *interface); +void gigaset_free_dev_sysfs(struct usb_interface *interface); + +/* =========================================================================== + * Functions implemented in common.c/gigaset.h + */ + +void gigaset_bcs_reinit(struct bc_state *bcs); +void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, + struct cardstate *cs, int cid); +int gigaset_get_channel(struct bc_state *bcs); +void gigaset_free_channel(struct bc_state *bcs); +int gigaset_get_channels(struct cardstate *cs); +void gigaset_free_channels(struct cardstate *cs); +void gigaset_block_channels(struct cardstate *cs); + +/* Allocate and initialize driver structure. */ +struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, + const char *procname, + const char *devname, + const char *devfsname, + const struct gigaset_ops *ops, + struct module *owner); + +/* Deallocate driver structure. */ +void gigaset_freedriver(struct gigaset_driver *drv); +void gigaset_debugdrivers(void); +struct cardstate *gigaset_get_cs_by_minor(unsigned minor); +struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty); +struct cardstate *gigaset_get_cs_by_id(int id); + +/* For drivers without fixed assignment device<->cardstate (usb) */ +struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv); +void gigaset_unassign(struct cardstate *cs); +void gigaset_blockdriver(struct gigaset_driver *drv); + +/* Allocate and initialize card state. Calls hardware dependent gigaset_init[b]cs(). */ +struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, + int onechannel, int ignoreframes, + int cidmode, const char *modulename); + +/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */ +void gigaset_freecs(struct cardstate *cs); + +/* Tell common.c that hardware and driver are ready. */ +int gigaset_start(struct cardstate *cs); + +/* Tell common.c that the device is not present any more. */ +void gigaset_stop(struct cardstate *cs); + +/* Tell common.c that the driver is being unloaded. */ +void gigaset_shutdown(struct cardstate *cs); + +/* Tell common.c that an skb has been sent. */ +void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); + +/* Append event to the queue. + * Returns NULL on failure or a pointer to the event on success. + * ptr must be kmalloc()ed (and not be freed by the caller). + */ +struct event_t *gigaset_add_event(struct cardstate *cs, + struct at_state_t *at_state, int type, + void *ptr, int parameter, void *arg); + +/* Called on CONFIG1 command from frontend. */ +int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode + +/* cs->lock must not be locked */ +static inline void gigaset_schedule_event(struct cardstate *cs) +{ + unsigned long flags; + spin_lock_irqsave(&cs->lock, flags); + if (atomic_read(&cs->running)) + tasklet_schedule(&cs->event_tasklet); + spin_unlock_irqrestore(&cs->lock, flags); +} + +/* Tell common.c that B channel has been closed. */ +/* cs->lock must not be locked */ +static inline void gigaset_bchannel_down(struct bc_state *bcs) +{ + gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL); + + dbg(DEBUG_CMD, "scheduling BC_CLOSED"); + gigaset_schedule_event(bcs->cs); +} + +/* Tell common.c that B channel has been opened. */ +/* cs->lock must not be locked */ +static inline void gigaset_bchannel_up(struct bc_state *bcs) +{ + gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL); + + dbg(DEBUG_CMD, "scheduling BC_OPEN"); + gigaset_schedule_event(bcs->cs); +} + +/* handling routines for sk_buff */ +/* ============================= */ + +/* private version of __skb_put() + * append 'len' bytes to the content of 'skb', already knowing that the + * existing buffer can accomodate them + * returns a pointer to the location where the new bytes should be copied to + * This function does not take any locks so it must be called with the + * appropriate locks held only. + */ +static inline unsigned char *gigaset_skb_put_quick(struct sk_buff *skb, + unsigned int len) +{ + unsigned char *tmp = skb->tail; + /*SKB_LINEAR_ASSERT(skb);*/ /* not needed here */ + skb->tail += len; + skb->len += len; + return tmp; +} + +/* pass received skb to LL + * Warning: skb must not be accessed anymore! + */ +static inline void gigaset_rcv_skb(struct sk_buff *skb, + struct cardstate *cs, + struct bc_state *bcs) +{ + cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb); + bcs->trans_down++; +} + +/* handle reception of corrupted skb + * Warning: skb must not be accessed anymore! + */ +static inline void gigaset_rcv_error(struct sk_buff *procskb, + struct cardstate *cs, + struct bc_state *bcs) +{ + if (procskb) + dev_kfree_skb(procskb); + + if (bcs->ignore) + --bcs->ignore; + else { + ++bcs->corrupted; + gigaset_isdn_rcv_err(bcs); + } +} + + +/* bitwise byte inversion table */ +extern __u8 gigaset_invtab[]; /* in common.c */ + + +/* append received bytes to inbuf */ +static inline int gigaset_fill_inbuf(struct inbuf_t *inbuf, + const unsigned char *src, + unsigned numbytes) +{ + unsigned n, head, tail, bytesleft; + + dbg(DEBUG_INTR, "received %u bytes", numbytes); + + if (!numbytes) + return 0; + + bytesleft = numbytes; + tail = atomic_read(&inbuf->tail); + head = atomic_read(&inbuf->head); + dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); + + while (bytesleft) { + if (head > tail) + n = head - 1 - tail; + else if (head == 0) + n = (RBUFSIZE-1) - tail; + else + n = RBUFSIZE - tail; + if (!n) { + err("buffer overflow (%u bytes lost)", bytesleft); + break; + } + if (n > bytesleft) + n = bytesleft; + memcpy(inbuf->data + tail, src, n); + bytesleft -= n; + tail = (tail + n) % RBUFSIZE; + src += n; + } + dbg(DEBUG_INTR, "setting tail to %u", tail); + atomic_set(&inbuf->tail, tail); + return numbytes != bytesleft; +} + +/* =========================================================================== + * Functions implemented in interface.c + */ + +/* initialize interface */ +void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, + const char *devname, const char *devfsname); +/* release interface */ +void gigaset_if_freedriver(struct gigaset_driver *drv); +/* add minor */ +void gigaset_if_init(struct cardstate *cs); +/* remove minor */ +void gigaset_if_free(struct cardstate *cs); +/* device received data */ +void gigaset_if_receive(struct cardstate *cs, + unsigned char *buffer, size_t len); + +#endif diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c new file mode 100644 index 000000000000..731a675f21b0 --- /dev/null +++ b/drivers/isdn/gigaset/i4l.c @@ -0,0 +1,567 @@ +/* + * Stuff used by all variants of the driver + * + * Copyright (c) 2001 by Stefan Eilers (Eilers.Stefan@epost.de), + * Hansjoerg Lipp (hjlipp@web.de), + * Tilman Schmidt (tilman@imap.cc). + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: i4l.c,v 1.3.2.9 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" + +/* == Handling of I4L IO ============================================================================*/ + +/* writebuf_from_LL + * called by LL to transmit data on an open channel + * inserts the buffer data into the send queue and starts the transmission + * Note that this operation must not sleep! + * When the buffer is processed completely, gigaset_skb_sent() should be called. + * parameters: + * driverID driver ID as assigned by LL + * channel channel number + * ack if != 0 LL wants to be notified on completion via statcallb(ISDN_STAT_BSENT) + * skb skb containing data to send + * return value: + * number of accepted bytes + * 0 if temporarily unable to accept data (out of buffer space) + * <0 on error (eg. -EINVAL) + */ +static int writebuf_from_LL(int driverID, int channel, int ack, struct sk_buff *skb) +{ + struct cardstate *cs; + struct bc_state *bcs; + unsigned len; + unsigned skblen; + + if (!(cs = gigaset_get_cs_by_id(driverID))) { + err("%s: invalid driver ID (%d)", __func__, driverID); + return -ENODEV; + } + if (channel < 0 || channel >= cs->channels) { + err("%s: invalid channel ID (%d)", __func__, channel); + return -ENODEV; + } + bcs = &cs->bcs[channel]; + len = skb->len; + + dbg(DEBUG_LLDATA, + "Receiving data from LL (id: %d, channel: %d, ack: %d, size: %d)", + driverID, channel, ack, len); + + if (!len) { + if (ack) + warn("not ACKing empty packet from LL"); + return 0; + } + if (len > MAX_BUF_SIZE) { + err("%s: packet too large (%d bytes)", __func__, channel); + return -EINVAL; + } + + if (!atomic_read(&cs->connected)) + return -ENODEV; + + skblen = ack ? len : 0; + skb->head[0] = skblen & 0xff; + skb->head[1] = skblen >> 8; + dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", len, skblen, + (unsigned) skb->head[0], (unsigned) skb->head[1]); + + /* pass to device-specific module */ + return cs->ops->send_skb(bcs, skb); +} + +void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) +{ + unsigned len; + isdn_ctrl response; + + ++bcs->trans_up; + + if (skb->len) + warn("%s: skb->len==%d", __func__, skb->len); + + len = (unsigned char) skb->head[0] | + (unsigned) (unsigned char) skb->head[1] << 8; + if (len) { + dbg(DEBUG_MCMD, + "Acknowledge sending to LL (id: %d, channel: %d size: %u)", + bcs->cs->myid, bcs->channel, len); + + response.driver = bcs->cs->myid; + response.command = ISDN_STAT_BSENT; + response.arg = bcs->channel; + response.parm.length = len; + bcs->cs->iif.statcallb(&response); + } +} +EXPORT_SYMBOL_GPL(gigaset_skb_sent); + +/* This function will be called by LL to send commands + * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL, + * so don't put too much effort into it. + */ +static int command_from_LL(isdn_ctrl *cntrl) +{ + struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver); + //isdn_ctrl response; + //unsigned long flags; + struct bc_state *bcs; + int retval = 0; + struct setup_parm *sp; + + //dbg(DEBUG_ANY, "Gigaset_HW: Receiving command"); + gigaset_debugdrivers(); + + /* Terminate this call if no device is present. Bt if the command is "ISDN_CMD_LOCK" or + * "ISDN_CMD_UNLOCK" then execute it due to the fact that they are device independent ! + */ + //FIXME "remove test for &connected" + if ((!cs || !atomic_read(&cs->connected))) { + warn("LL tried to access unknown device with nr. %d", + cntrl->driver); + return -ENODEV; + } + + switch (cntrl->command) { + case ISDN_CMD_IOCTL: + + dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver:%d,arg: %ld)", + cntrl->driver, cntrl->arg); + + warn("ISDN_CMD_IOCTL is not supported."); + return -EINVAL; + + case ISDN_CMD_DIAL: + dbg(DEBUG_ANY, "ISDN_CMD_DIAL (driver: %d, channel: %ld, " + "phone: %s,ownmsn: %s, si1: %d, si2: %d)", + cntrl->driver, cntrl->arg, + cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn, + cntrl->parm.setup.si1, cntrl->parm.setup.si2); + + if (cntrl->arg >= cs->channels) { + err("invalid channel (%d)", (int) cntrl->arg); + return -EINVAL; + } + + bcs = cs->bcs + cntrl->arg; + + if (!gigaset_get_channel(bcs)) { + err("channel not free"); + return -EBUSY; + } + + sp = kmalloc(sizeof *sp, GFP_ATOMIC); + if (!sp) { + gigaset_free_channel(bcs); + err("ISDN_CMD_DIAL: out of memory"); + return -ENOMEM; + } + *sp = cntrl->parm.setup; + + if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp, + atomic_read(&bcs->at_state.seq_index), + NULL)) { + //FIXME what should we do? + kfree(sp); + gigaset_free_channel(bcs); + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling DIAL"); + gigaset_schedule_event(cs); + break; + case ISDN_CMD_ACCEPTD: //FIXME + dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); + + if (cntrl->arg >= cs->channels) { + err("invalid channel (%d)", (int) cntrl->arg); + return -EINVAL; + } + + if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, + EV_ACCEPT, NULL, 0, NULL)) { + //FIXME what should we do? + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling ACCEPT"); + gigaset_schedule_event(cs); + + break; + case ISDN_CMD_ACCEPTB: + dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB"); + break; + case ISDN_CMD_HANGUP: + dbg(DEBUG_ANY, + "ISDN_CMD_HANGUP (channel: %d)", (int) cntrl->arg); + + if (cntrl->arg >= cs->channels) { + err("ISDN_CMD_HANGUP: invalid channel (%u)", + (unsigned) cntrl->arg); + return -EINVAL; + } + + if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, + EV_HUP, NULL, 0, NULL)) { + //FIXME what should we do? + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling HUP"); + gigaset_schedule_event(cs); + + break; + case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME + dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ"); + break; + case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME + dbg(DEBUG_ANY, + "ISDN_CMD_SETEAZ (id:%d, channel: %ld, number: %s)", + cntrl->driver, cntrl->arg, cntrl->parm.num); + break; + case ISDN_CMD_SETL2: /* Set L2 to given protocol */ + dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (Channel: %ld, Proto: %lx)", + cntrl->arg & 0xff, (cntrl->arg >> 8)); + + if ((cntrl->arg & 0xff) >= cs->channels) { + err("invalid channel (%u)", + (unsigned) cntrl->arg & 0xff); + return -EINVAL; + } + + if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state, + EV_PROTO_L2, NULL, cntrl->arg >> 8, + NULL)) { + //FIXME what should we do? + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling PROTO_L2"); + gigaset_schedule_event(cs); + break; + case ISDN_CMD_SETL3: /* Set L3 to given protocol */ + dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (Channel: %ld, Proto: %lx)", + cntrl->arg & 0xff, (cntrl->arg >> 8)); + + if ((cntrl->arg & 0xff) >= cs->channels) { + err("invalid channel (%u)", + (unsigned) cntrl->arg & 0xff); + return -EINVAL; + } + + if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { + err("invalid protocol %lu", cntrl->arg >> 8); + return -EINVAL; + } + + break; + case ISDN_CMD_PROCEED: + dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME + break; + case ISDN_CMD_ALERT: + dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME + if (cntrl->arg >= cs->channels) { + err("invalid channel (%d)", (int) cntrl->arg); + return -EINVAL; + } + //bcs = cs->bcs + cntrl->arg; + //bcs->proto2 = -1; + // FIXME + break; + case ISDN_CMD_REDIR: + dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME + break; + case ISDN_CMD_PROT_IO: + dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO"); + break; + case ISDN_CMD_FAXCMD: + dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD"); + break; + case ISDN_CMD_GETL2: + dbg(DEBUG_ANY, "ISDN_CMD_GETL2"); + break; + case ISDN_CMD_GETL3: + dbg(DEBUG_ANY, "ISDN_CMD_GETL3"); + break; + case ISDN_CMD_GETEAZ: + dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ"); + break; + case ISDN_CMD_SETSIL: + dbg(DEBUG_ANY, "ISDN_CMD_SETSIL"); + break; + case ISDN_CMD_GETSIL: + dbg(DEBUG_ANY, "ISDN_CMD_GETSIL"); + break; + default: + err("unknown command %d from LL", + cntrl->command); + return -EINVAL; + } + + return retval; +} + +void gigaset_i4l_cmd(struct cardstate *cs, int cmd) +{ + isdn_ctrl command; + + command.driver = cs->myid; + command.command = cmd; + command.arg = 0; + cs->iif.statcallb(&command); +} + +void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd) +{ + isdn_ctrl command; + + command.driver = bcs->cs->myid; + command.command = cmd; + command.arg = bcs->channel; + bcs->cs->iif.statcallb(&command); +} + +int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data) +{ + struct bc_state *bcs = at_state->bcs; + unsigned proto; + const char *bc; + size_t length[AT_NUM]; + size_t l; + int i; + struct setup_parm *sp = data; + + switch (bcs->proto2) { + case ISDN_PROTO_L2_HDLC: + proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */ + break; + case ISDN_PROTO_L2_TRANS: + proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */ + break; + default: + err("invalid protocol: %u", bcs->proto2); + return -EINVAL; + } + + switch (sp->si1) { + case 1: /* audio */ + bc = "9090A3"; /* 3.1 kHz audio, A-law */ + break; + case 7: /* data */ + default: /* hope the app knows what it is doing */ + bc = "8890"; /* unrestricted digital information */ + } + //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC + + length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1; + l = strlen(sp->eazmsn); + length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0; + length[AT_BC ] = 5 + strlen(bc) + 1 + 1; + length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */ + length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */ + length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */ + length[AT_HLC ] = 0; + + for (i = 0; i < AT_NUM; ++i) { + kfree(bcs->commands[i]); + bcs->commands[i] = NULL; + if (length[i] && + !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) { + err("out of memory"); + return -ENOMEM; + } + } + + /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */ + if (sp->phone[0] == '*' && sp->phone[1] == '*') { + /* internal call: translate ** prefix to CTP value */ + snprintf(bcs->commands[AT_DIAL], length[AT_DIAL], + "D%s\r", sp->phone+2); + strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]); + } else { + snprintf(bcs->commands[AT_DIAL], length[AT_DIAL], + "D%s\r", sp->phone); + strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]); + } + + if (bcs->commands[AT_MSN]) + snprintf(bcs->commands[AT_MSN], length[AT_MSN], "^SMSN=%s\r", sp->eazmsn); + snprintf(bcs->commands[AT_BC ], length[AT_BC ], "^SBC=%s\r", bc); + snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto); + snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned)bcs->channel + 1); + + return 0; +} + +int gigaset_isdn_setup_accept(struct at_state_t *at_state) +{ + unsigned proto; + size_t length[AT_NUM]; + int i; + struct bc_state *bcs = at_state->bcs; + + switch (bcs->proto2) { + case ISDN_PROTO_L2_HDLC: + proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */ + break; + case ISDN_PROTO_L2_TRANS: + proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */ + break; + default: + err("invalid protocol: %u", bcs->proto2); + return -EINVAL; + } + + length[AT_DIAL ] = 0; + length[AT_MSN ] = 0; + length[AT_BC ] = 0; + length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */ + length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */ + length[AT_TYPE ] = 0; + length[AT_HLC ] = 0; + + for (i = 0; i < AT_NUM; ++i) { + kfree(bcs->commands[i]); + bcs->commands[i] = NULL; + if (length[i] && + !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) { + err("out of memory"); + return -ENOMEM; + } + } + + snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto); + snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned) bcs->channel + 1); + + return 0; +} + +int gigaset_isdn_icall(struct at_state_t *at_state) +{ + struct cardstate *cs = at_state->cs; + struct bc_state *bcs = at_state->bcs; + isdn_ctrl response; + int retval; + + /* fill ICALL structure */ + response.parm.setup.si1 = 0; /* default: unknown */ + response.parm.setup.si2 = 0; + response.parm.setup.screen = 0; //FIXME how to set these? + response.parm.setup.plan = 0; + if (!at_state->str_var[STR_ZBC]) { + /* no BC (internal call): assume speech, A-law */ + response.parm.setup.si1 = 1; + } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) { + /* unrestricted digital information */ + response.parm.setup.si1 = 7; + } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) { + /* speech, A-law */ + response.parm.setup.si1 = 1; + } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) { + /* 3,1 kHz audio, A-law */ + response.parm.setup.si1 = 1; + response.parm.setup.si2 = 2; + } else { + warn("RING ignored - unsupported BC %s", + at_state->str_var[STR_ZBC]); + return ICALL_IGNORE; + } + if (at_state->str_var[STR_NMBR]) { + strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR], + sizeof response.parm.setup.phone - 1); + response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0; + } else + response.parm.setup.phone[0] = 0; + if (at_state->str_var[STR_ZCPN]) { + strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN], + sizeof response.parm.setup.eazmsn - 1); + response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0; + } else + response.parm.setup.eazmsn[0] = 0; + + if (!bcs) { + notice("no channel for incoming call"); + dbg(DEBUG_CMD, "Sending ICALLW"); + response.command = ISDN_STAT_ICALLW; + response.arg = 0; //FIXME + } else { + dbg(DEBUG_CMD, "Sending ICALL"); + response.command = ISDN_STAT_ICALL; + response.arg = bcs->channel; //FIXME + } + response.driver = cs->myid; + retval = cs->iif.statcallb(&response); + dbg(DEBUG_CMD, "Response: %d", retval); + switch (retval) { + case 0: /* no takers */ + return ICALL_IGNORE; + case 1: /* alerting */ + bcs->chstate |= CHS_NOTIFY_LL; + return ICALL_ACCEPT; + case 2: /* reject */ + return ICALL_REJECT; + case 3: /* incomplete */ + warn("LL requested unsupported feature: Incomplete Number"); + return ICALL_IGNORE; + case 4: /* proceeding */ + /* Gigaset will send ALERTING anyway. + * There doesn't seem to be a way to avoid this. + */ + return ICALL_ACCEPT; + case 5: /* deflect */ + warn("LL requested unsupported feature: Call Deflection"); + return ICALL_IGNORE; + default: + err("LL error %d on ICALL", retval); + return ICALL_IGNORE; + } +} + +/* Set Callback function pointer */ +int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid) +{ + isdn_if *iif = &cs->iif; + + dbg(DEBUG_ANY, "Register driver capabilities to LL"); + + //iif->id[sizeof(iif->id) - 1]=0; + //strncpy(iif->id, isdnid, sizeof(iif->id) - 1); + if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) + >= sizeof iif->id) + return -ENOMEM; //FIXME EINVAL/...?? + + iif->owner = THIS_MODULE; + iif->channels = cs->channels; /* I am supporting just one channel *//* I was supporting...*/ + iif->maxbufsize = MAX_BUF_SIZE; + iif->features = ISDN_FEATURE_L2_TRANS | /* Our device is very advanced, therefore */ + ISDN_FEATURE_L2_HDLC | +#ifdef GIG_X75 + ISDN_FEATURE_L2_X75I | +#endif + ISDN_FEATURE_L3_TRANS | + ISDN_FEATURE_P_EURO; + iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */ + iif->command = command_from_LL; + iif->writebuf_skb = writebuf_from_LL; + iif->writecmd = NULL; /* Don't support isdnctrl */ + iif->readstat = NULL; /* Don't support isdnctrl */ + iif->rcvcallb_skb = NULL; /* Will be set by LL */ + iif->statcallb = NULL; /* Will be set by LL */ + + if (!register_isdn(iif)) + return 0; + + cs->myid = iif->channels; /* Set my device id */ + return 1; +} diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c new file mode 100644 index 000000000000..3a81d9c65141 --- /dev/null +++ b/drivers/isdn/gigaset/interface.c @@ -0,0 +1,718 @@ +/* + * interface to user space for the gigaset driver + * + * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * Version: $Id: interface.c,v 1.14.4.15 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" +#include <linux/gigaset_dev.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> + +/*** our ioctls ***/ + +static int if_lock(struct cardstate *cs, int *arg) +{ + int cmd = *arg; + + dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd); + + if (cmd > 1) + return -EINVAL; + + if (cmd < 0) { + *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove? + return 0; + } + + if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED + && atomic_read(&cs->connected)) { + cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS); + cs->ops->baud_rate(cs, B115200); + cs->ops->set_line_ctrl(cs, CS8); + cs->control_state = TIOCM_DTR|TIOCM_RTS; + } + + cs->waiting = 1; + if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK, + NULL, cmd, NULL)) { + cs->waiting = 0; + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling IF_LOCK"); + gigaset_schedule_event(cs); + + wait_event(cs->waitqueue, !cs->waiting); + + if (cs->cmd_result >= 0) { + *arg = cs->cmd_result; + return 0; + } + + return cs->cmd_result; +} + +static int if_version(struct cardstate *cs, unsigned arg[4]) +{ + static const unsigned version[4] = GIG_VERSION; + static const unsigned compat[4] = GIG_COMPAT; + unsigned cmd = arg[0]; + + dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd); + + switch (cmd) { + case GIGVER_DRIVER: + memcpy(arg, version, sizeof version); + return 0; + case GIGVER_COMPAT: + memcpy(arg, compat, sizeof compat); + return 0; + case GIGVER_FWBASE: + cs->waiting = 1; + if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER, + NULL, 0, arg)) { + cs->waiting = 0; + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling IF_VER"); + gigaset_schedule_event(cs); + + wait_event(cs->waitqueue, !cs->waiting); + + if (cs->cmd_result >= 0) + return 0; + + return cs->cmd_result; + default: + return -EINVAL; + } +} + +static int if_config(struct cardstate *cs, int *arg) +{ + dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg); + + if (*arg != 1) + return -EINVAL; + + if (atomic_read(&cs->mstate) != MS_LOCKED) + return -EBUSY; + + *arg = 0; + return gigaset_enterconfigmode(cs); +} + +/*** the terminal driver ***/ +/* stolen from usbserial and some other tty drivers */ + +static int if_open(struct tty_struct *tty, struct file *filp); +static void if_close(struct tty_struct *tty, struct file *filp); +static int if_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); +static int if_write_room(struct tty_struct *tty); +static int if_chars_in_buffer(struct tty_struct *tty); +static void if_throttle(struct tty_struct *tty); +static void if_unthrottle(struct tty_struct *tty); +static void if_set_termios(struct tty_struct *tty, struct termios *old); +static int if_tiocmget(struct tty_struct *tty, struct file *file); +static int if_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear); +static int if_write(struct tty_struct *tty, + const unsigned char *buf, int count); + +static struct tty_operations if_ops = { + .open = if_open, + .close = if_close, + .ioctl = if_ioctl, + .write = if_write, + .write_room = if_write_room, + .chars_in_buffer = if_chars_in_buffer, + .set_termios = if_set_termios, + .throttle = if_throttle, + .unthrottle = if_unthrottle, +#if 0 + .break_ctl = serial_break, +#endif + .tiocmget = if_tiocmget, + .tiocmset = if_tiocmset, +}; + +static int if_open(struct tty_struct *tty, struct file *filp) +{ + struct cardstate *cs; + unsigned long flags; + + dbg(DEBUG_IF, "%d+%d: %s()", tty->driver->minor_start, tty->index, + __FUNCTION__); + + tty->driver_data = NULL; + + cs = gigaset_get_cs_by_tty(tty); + if (!cs) + return -ENODEV; + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + tty->driver_data = cs; + + ++cs->open_count; + + if (cs->open_count == 1) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = tty; + spin_unlock_irqrestore(&cs->lock, flags); + tty->low_latency = 1; //FIXME test + //FIXME + } + + up(&cs->sem); + return 0; +} + +static void if_close(struct tty_struct *tty, struct file *filp) +{ + struct cardstate *cs; + unsigned long flags; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + down(&cs->sem); + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else { + if (!--cs->open_count) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = NULL; + spin_unlock_irqrestore(&cs->lock, flags); + //FIXME + } + } + + up(&cs->sem); +} + +static int if_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct cardstate *cs; + int retval = -ENODEV; + int int_arg; + unsigned char buf[6]; + unsigned version[4]; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __FUNCTION__, cmd); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else { + retval = 0; + switch (cmd) { + case GIGASET_REDIR: + retval = get_user(int_arg, (int __user *) arg); + if (retval >= 0) + retval = if_lock(cs, &int_arg); + if (retval >= 0) + retval = put_user(int_arg, (int __user *) arg); + break; + case GIGASET_CONFIG: + retval = get_user(int_arg, (int __user *) arg); + if (retval >= 0) + retval = if_config(cs, &int_arg); + if (retval >= 0) + retval = put_user(int_arg, (int __user *) arg); + break; + case GIGASET_BRKCHARS: + //FIXME test if MS_LOCKED + gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS", + 6, (const unsigned char *) arg, 1); + if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't communicate with unplugged device"); + retval = -ENODEV; + break; + } + retval = copy_from_user(&buf, + (const unsigned char __user *) arg, 6) + ? -EFAULT : 0; + if (retval >= 0) + retval = cs->ops->brkchars(cs, buf); + break; + case GIGASET_VERSION: + retval = copy_from_user(version, (unsigned __user *) arg, + sizeof version) ? -EFAULT : 0; + if (retval >= 0) + retval = if_version(cs, version); + if (retval >= 0) + retval = copy_to_user((unsigned __user *) arg, version, + sizeof version) + ? -EFAULT : 0; + break; + default: + dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x", + __FUNCTION__, cmd); + retval = -ENOIOCTLCMD; + } + } + + up(&cs->sem); + + return retval; +} + +static int if_tiocmget(struct tty_struct *tty, struct file *file) +{ + struct cardstate *cs; + int retval; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + // FIXME read from device? + retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR); + + up(&cs->sem); + + return retval; +} + +static int if_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear) +{ + struct cardstate *cs; + int retval; + unsigned mc; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, + "%u: %s(0x%x, 0x%x)", cs->minor_index, __FUNCTION__, set, clear); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't communicate with unplugged device"); + retval = -ENODEV; + } else { + mc = (cs->control_state | set) & ~clear & (TIOCM_RTS|TIOCM_DTR); + retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc); + cs->control_state = mc; + } + + up(&cs->sem); + + return retval; +} + +static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct cardstate *cs; + int retval = -ENODEV; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else if (atomic_read(&cs->mstate) != MS_LOCKED) { + warn("can't write to unlocked device"); + retval = -EBUSY; + } else if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't write to unplugged device"); + retval = -EBUSY; //FIXME + } else { + retval = cs->ops->write_cmd(cs, buf, count, + &cs->if_wake_tasklet); + } + + up(&cs->sem); + + return retval; +} + +static int if_write_room(struct tty_struct *tty) +{ + struct cardstate *cs; + int retval = -ENODEV; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else if (atomic_read(&cs->mstate) != MS_LOCKED) { + warn("can't write to unlocked device"); + retval = -EBUSY; //FIXME + } else if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't write to unplugged device"); + retval = -EBUSY; //FIXME + } else + retval = cs->ops->write_room(cs); + + up(&cs->sem); + + return retval; +} + +static int if_chars_in_buffer(struct tty_struct *tty) +{ + struct cardstate *cs; + int retval = -ENODEV; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return -ENODEV; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else if (atomic_read(&cs->mstate) != MS_LOCKED) { + warn("can't write to unlocked device"); + retval = -EBUSY; + } else if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't write to unplugged device"); + retval = -EBUSY; //FIXME + } else + retval = cs->ops->chars_in_buffer(cs); + + up(&cs->sem); + + return retval; +} + +static void if_throttle(struct tty_struct *tty) +{ + struct cardstate *cs; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + down(&cs->sem); + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else { + //FIXME + } + + up(&cs->sem); +} + +static void if_unthrottle(struct tty_struct *tty) +{ + struct cardstate *cs; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + down(&cs->sem); + + if (!cs->open_count) + warn("%s: device not opened", __FUNCTION__); + else { + //FIXME + } + + up(&cs->sem); +} + +static void if_set_termios(struct tty_struct *tty, struct termios *old) +{ + struct cardstate *cs; + unsigned int iflag; + unsigned int cflag; + unsigned int old_cflag; + unsigned int control_state, new_state; + + cs = (struct cardstate *) tty->driver_data; + if (!cs) { + err("cs==NULL in %s", __FUNCTION__); + return; + } + + dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); + + down(&cs->sem); + + if (!cs->open_count) { + warn("%s: device not opened", __FUNCTION__); + goto out; + } + + if (!atomic_read(&cs->connected)) { + dbg(DEBUG_ANY, "can't communicate with unplugged device"); + goto out; + } + + // stolen from mct_u232.c + iflag = tty->termios->c_iflag; + cflag = tty->termios->c_cflag; + old_cflag = old ? old->c_cflag : cflag; //FIXME? + dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", cs->minor_index, + iflag, cflag, old_cflag); + + /* get a local copy of the current port settings */ + control_state = cs->control_state; + + /* + * Update baud rate. + * Do not attempt to cache old rates and skip settings, + * disconnects screw such tricks up completely. + * Premature optimization is the root of all evil. + */ + + /* reassert DTR and (maybe) RTS on transition from B0 */ + if ((old_cflag & CBAUD) == B0) { + new_state = control_state | TIOCM_DTR; + /* don't set RTS if using hardware flow control */ + if (!(old_cflag & CRTSCTS)) + new_state |= TIOCM_RTS; + dbg(DEBUG_IF, "%u: from B0 - set DTR%s", cs->minor_index, + (new_state & TIOCM_RTS) ? " only" : "/RTS"); + cs->ops->set_modem_ctrl(cs, control_state, new_state); + control_state = new_state; + } + + cs->ops->baud_rate(cs, cflag & CBAUD); + + if ((cflag & CBAUD) == B0) { + /* Drop RTS and DTR */ + dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index); + new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS); + cs->ops->set_modem_ctrl(cs, control_state, new_state); + control_state = new_state; + } + + /* + * Update line control register (LCR) + */ + + cs->ops->set_line_ctrl(cs, cflag); + +#if 0 + //FIXME this hangs M101 [ts 2005-03-09] + //FIXME do we need this? + /* + * Set flow control: well, I do not really now how to handle DTR/RTS. + * Just do what we have seen with SniffUSB on Win98. + */ + /* Drop DTR/RTS if no flow control otherwise assert */ + dbg(DEBUG_IF, "%u: control_state %x", cs->minor_index, control_state); + new_state = control_state; + if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS)) + new_state |= TIOCM_DTR | TIOCM_RTS; + else + new_state &= ~(TIOCM_DTR | TIOCM_RTS); + if (new_state != control_state) { + dbg(DEBUG_IF, "%u: new_state %x", cs->minor_index, new_state); + gigaset_set_modem_ctrl(cs, control_state, new_state); // FIXME: mct_u232.c sets the old state here. is this a bug? + control_state = new_state; + } +#endif + + /* save off the modified port settings */ + cs->control_state = control_state; + +out: + up(&cs->sem); +} + + +/* wakeup tasklet for the write operation */ +static void if_wake(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + struct tty_struct *tty; + + tty = cs->tty; + if (!tty) + return; + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) { + dbg(DEBUG_IF, "write wakeup call"); + tty->ldisc.write_wakeup(tty); + } + + wake_up_interruptible(&tty->write_wait); +} + +/*** interface to common ***/ + +void gigaset_if_init(struct cardstate *cs) +{ + struct gigaset_driver *drv; + + drv = cs->driver; + if (!drv->have_tty) + return; + + tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs); + tty_register_device(drv->tty, cs->minor_index, NULL); +} + +void gigaset_if_free(struct cardstate *cs) +{ + struct gigaset_driver *drv; + + drv = cs->driver; + if (!drv->have_tty) + return; + + tasklet_disable(&cs->if_wake_tasklet); + tasklet_kill(&cs->if_wake_tasklet); + tty_unregister_device(drv->tty, cs->minor_index); +} + +void gigaset_if_receive(struct cardstate *cs, + unsigned char *buffer, size_t len) +{ + unsigned long flags; + struct tty_struct *tty; + + spin_lock_irqsave(&cs->lock, flags); + if ((tty = cs->tty) == NULL) + dbg(DEBUG_ANY, "receive on closed device"); + else { + tty_buffer_request_room(tty, len); + tty_insert_flip_string(tty, buffer, len); + tty_flip_buffer_push(tty); + } + spin_unlock_irqrestore(&cs->lock, flags); +} +EXPORT_SYMBOL_GPL(gigaset_if_receive); + +/* gigaset_if_initdriver + * Initialize tty interface. + * parameters: + * drv Driver + * procname Name of the driver (e.g. for /proc/tty/drivers) + * devname Name of the device files (prefix without minor number) + * devfsname Devfs name of the device files without %d + */ +void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, + const char *devname, const char *devfsname) +{ + unsigned minors = drv->minors; + int ret; + struct tty_driver *tty; + + drv->have_tty = 0; + + if ((drv->tty = alloc_tty_driver(minors)) == NULL) + goto enomem; + tty = drv->tty; + + tty->magic = TTY_DRIVER_MAGIC, + tty->major = GIG_MAJOR, + tty->type = TTY_DRIVER_TYPE_SERIAL, + tty->subtype = SERIAL_TYPE_NORMAL, + tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, + + tty->driver_name = procname; + tty->name = devname; + tty->minor_start = drv->minor; + tty->num = drv->minors; + + tty->owner = THIS_MODULE; + tty->devfs_name = devfsname; + + tty->init_termios = tty_std_termios; //FIXME + tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME + tty_set_operations(tty, &if_ops); + + ret = tty_register_driver(tty); + if (ret < 0) { + warn("failed to register tty driver (error %d)", ret); + goto error; + } + dbg(DEBUG_IF, "tty driver initialized"); + drv->have_tty = 1; + return; + +enomem: + warn("could not allocate tty structures"); +error: + if (drv->tty) + put_tty_driver(drv->tty); +} + +void gigaset_if_freedriver(struct gigaset_driver *drv) +{ + if (!drv->have_tty) + return; + + drv->have_tty = 0; + tty_unregister_driver(drv->tty); + put_tty_driver(drv->tty); +} diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c new file mode 100644 index 000000000000..5744eb91b315 --- /dev/null +++ b/drivers/isdn/gigaset/isocdata.c @@ -0,0 +1,1009 @@ +/* + * Common data handling layer for bas_gigaset + * + * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>, + * Hansjoerg Lipp <hjlipp@web.de>. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: isocdata.c,v 1.2.2.5 2005/11/13 23:05:19 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" +#include <linux/crc-ccitt.h> + +/* access methods for isowbuf_t */ +/* ============================ */ + +/* initialize buffer structure + */ +void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle) +{ + atomic_set(&iwb->read, 0); + atomic_set(&iwb->nextread, 0); + atomic_set(&iwb->write, 0); + atomic_set(&iwb->writesem, 1); + iwb->wbits = 0; + iwb->idle = idle; + memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD); +} + +/* compute number of bytes which can be appended to buffer + * so that there is still room to append a maximum frame of flags + */ +static inline int isowbuf_freebytes(struct isowbuf_t *iwb) +{ + int read, write, freebytes; + + read = atomic_read(&iwb->read); + write = atomic_read(&iwb->write); + if ((freebytes = read - write) > 0) { + /* no wraparound: need padding space within regular area */ + return freebytes - BAS_OUTBUFPAD; + } else if (read < BAS_OUTBUFPAD) { + /* wraparound: can use space up to end of regular area */ + return BAS_OUTBUFSIZE - write; + } else { + /* following the wraparound yields more space */ + return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD; + } +} + +/* compare two offsets within the buffer + * The buffer is seen as circular, with the read position as start + * returns -1/0/1 if position a </=/> position b without crossing 'read' + */ +static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b) +{ + int read; + if (a == b) + return 0; + read = atomic_read(&iwb->read); + if (a < b) { + if (a < read && read <= b) + return +1; + else + return -1; + } else { + if (b < read && read <= a) + return -1; + else + return +1; + } +} + +/* start writing + * acquire the write semaphore + * return true if acquired, false if busy + */ +static inline int isowbuf_startwrite(struct isowbuf_t *iwb) +{ + if (!atomic_dec_and_test(&iwb->writesem)) { + atomic_inc(&iwb->writesem); + dbg(DEBUG_ISO, + "%s: couldn't acquire iso write semaphore", __func__); + return 0; + } +#ifdef CONFIG_GIGASET_DEBUG + dbg(DEBUG_ISO, + "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", + __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits); +#endif + return 1; +} + +/* finish writing + * release the write semaphore and update the maximum buffer fill level + * returns the current write position + */ +static inline int isowbuf_donewrite(struct isowbuf_t *iwb) +{ + int write = atomic_read(&iwb->write); + atomic_inc(&iwb->writesem); + return write; +} + +/* append bits to buffer without any checks + * - data contains bits to append, starting at LSB + * - nbits is number of bits to append (0..24) + * must be called with the write semaphore held + * If more than nbits bits are set in data, the extraneous bits are set in the + * buffer too, but the write position is only advanced by nbits. + */ +static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits) +{ + int write = atomic_read(&iwb->write); + data <<= iwb->wbits; + data |= iwb->data[write]; + nbits += iwb->wbits; + while (nbits >= 8) { + iwb->data[write++] = data & 0xff; + write %= BAS_OUTBUFSIZE; + data >>= 8; + nbits -= 8; + } + iwb->wbits = nbits; + iwb->data[write] = data & 0xff; + atomic_set(&iwb->write, write); +} + +/* put final flag on HDLC bitstream + * also sets the idle fill byte to the correspondingly shifted flag pattern + * must be called with the write semaphore held + */ +static inline void isowbuf_putflag(struct isowbuf_t *iwb) +{ + int write; + + /* add two flags, thus reliably covering one byte */ + isowbuf_putbits(iwb, 0x7e7e, 8); + /* recover the idle flag byte */ + write = atomic_read(&iwb->write); + iwb->idle = iwb->data[write]; + dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle); + /* mask extraneous bits in buffer */ + iwb->data[write] &= (1 << iwb->wbits) - 1; +} + +/* retrieve a block of bytes for sending + * The requested number of bytes is provided as a contiguous block. + * If necessary, the frame is filled to the requested number of bytes + * with the idle value. + * returns offset to frame, < 0 on busy or error + */ +int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) +{ + int read, write, limit, src, dst; + unsigned char pbyte; + + read = atomic_read(&iwb->nextread); + write = atomic_read(&iwb->write); + if (likely(read == write)) { + //dbg(DEBUG_STREAM, "%s: send buffer empty", __func__); + /* return idle frame */ + return read < BAS_OUTBUFPAD ? + BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD; + } + + limit = read + size; + dbg(DEBUG_STREAM, + "%s: read=%d write=%d limit=%d", __func__, read, write, limit); +#ifdef CONFIG_GIGASET_DEBUG + if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) { + err("invalid size %d", size); + return -EINVAL; + } + src = atomic_read(&iwb->read); + if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD || + (read < src && limit >= src))) { + err("isoc write buffer frame reservation violated"); + return -EFAULT; + } +#endif + + if (read < write) { + /* no wraparound in valid data */ + if (limit >= write) { + /* append idle frame */ + if (!isowbuf_startwrite(iwb)) + return -EBUSY; + /* write position could have changed */ + if (limit >= (write = atomic_read(&iwb->write))) { + pbyte = iwb->data[write]; /* save partial byte */ + limit = write + BAS_OUTBUFPAD; + dbg(DEBUG_STREAM, + "%s: filling %d->%d with %02x", + __func__, write, limit, iwb->idle); + if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE) + memset(iwb->data + write, iwb->idle, + BAS_OUTBUFPAD); + else { + /* wraparound, fill entire pad area */ + memset(iwb->data + write, iwb->idle, + BAS_OUTBUFSIZE + BAS_OUTBUFPAD + - write); + limit = 0; + } + dbg(DEBUG_STREAM, "%s: restoring %02x at %d", + __func__, pbyte, limit); + iwb->data[limit] = pbyte; /* restore partial byte */ + atomic_set(&iwb->write, limit); + } + isowbuf_donewrite(iwb); + } + } else { + /* valid data wraparound */ + if (limit >= BAS_OUTBUFSIZE) { + /* copy wrapped part into pad area */ + src = 0; + dst = BAS_OUTBUFSIZE; + while (dst < limit && src < write) + iwb->data[dst++] = iwb->data[src++]; + if (dst <= limit) { + /* fill pad area with idle byte */ + memset(iwb->data + dst, iwb->idle, + BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst); + } + limit = src; + } + } + atomic_set(&iwb->nextread, limit); + return read; +} + +/* dump_bytes + * write hex bytes to syslog for debugging + */ +static inline void dump_bytes(enum debuglevel level, const char *tag, + unsigned char *bytes, int count) +{ +#ifdef CONFIG_GIGASET_DEBUG + unsigned char c; + static char dbgline[3 * 32 + 1]; + static const char hexdigit[] = "0123456789abcdef"; + int i = 0; + IFNULLRET(tag); + IFNULLRET(bytes); + while (count-- > 0) { + if (i > sizeof(dbgline) - 4) { + dbgline[i] = '\0'; + dbg(level, "%s:%s", tag, dbgline); + i = 0; + } + c = *bytes++; + dbgline[i] = (i && !(i % 12)) ? '-' : ' '; + i++; + dbgline[i++] = hexdigit[(c >> 4) & 0x0f]; + dbgline[i++] = hexdigit[c & 0x0f]; + } + dbgline[i] = '\0'; + dbg(level, "%s:%s", tag, dbgline); +#endif +} + +/*============================================================================*/ + +/* bytewise HDLC bitstuffing via table lookup + * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits + * index: 256*(number of preceding '1' bits) + (next byte to stuff) + * value: bit 9.. 0 = result bits + * bit 12..10 = number of trailing '1' bits in result + * bit 14..13 = number of bits added by stuffing + */ +static u16 stufftab[5 * 256] = { +// previous 1s = 0: + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, + 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, + 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, + 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f, + 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, + 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f, + 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, + 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df, + 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f, + 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f, + 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af, + 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f, + 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf, + 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f, + 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, + 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, + +// previous 1s = 1: + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, + 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, + 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, + 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f, + 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f, + 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af, + 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf, + 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef, + 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f, + 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f, + 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f, + 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f, + 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f, + 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af, + 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, + 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, + +// previous 1s = 2: + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, + 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, + 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, + 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077, + 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097, + 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7, + 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7, + 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7, + 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517, + 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537, + 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557, + 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577, + 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997, + 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7, + 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, + 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, + +// previous 1s = 3: + 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, + 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, + 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, + 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b, + 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b, + 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb, + 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db, + 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb, + 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b, + 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b, + 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b, + 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b, + 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b, + 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb, + 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, + 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, + +// previous 1s = 4: + 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, + 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, + 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, + 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d, + 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d, + 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd, + 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd, + 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d, + 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d, + 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d, + 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d, + 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d, + 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d, + 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd, + 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd, + 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d +}; + +/* hdlc_bitstuff_byte + * perform HDLC bitstuffing for one input byte (8 bits, LSB first) + * parameters: + * cin input byte + * ones number of trailing '1' bits in result before this step + * iwb pointer to output buffer structure (write semaphore must be held) + * return value: + * number of trailing '1' bits in result after this step + */ + +static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin, + int ones) +{ + u16 stuff; + int shiftinc, newones; + + /* get stuffing information for input byte + * value: bit 9.. 0 = result bits + * bit 12..10 = number of trailing '1' bits in result + * bit 14..13 = number of bits added by stuffing + */ + stuff = stufftab[256 * ones + cin]; + shiftinc = (stuff >> 13) & 3; + newones = (stuff >> 10) & 7; + stuff &= 0x3ff; + + /* append stuffed byte to output stream */ + isowbuf_putbits(iwb, stuff, 8 + shiftinc); + return newones; +} + +/* hdlc_buildframe + * Perform HDLC framing with bitstuffing on a byte buffer + * The input buffer is regarded as a sequence of bits, starting with the least + * significant bit of the first byte and ending with the most significant bit + * of the last byte. A 16 bit FCS is appended as defined by RFC 1662. + * Whenever five consecutive '1' bits appear in the resulting bit sequence, a + * '0' bit is inserted after them. + * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110') + * are appended to the output buffer starting at the given bit position, which + * is assumed to already contain a leading flag. + * The output buffer must have sufficient length; count + count/5 + 6 bytes + * starting at *out are safe and are verified to be present. + * parameters: + * in input buffer + * count number of bytes in input buffer + * iwb pointer to output buffer structure (write semaphore must be held) + * return value: + * position of end of packet in output buffer on success, + * -EAGAIN if write semaphore busy or buffer full + */ + +static inline int hdlc_buildframe(struct isowbuf_t *iwb, + unsigned char *in, int count) +{ + int ones; + u16 fcs; + int end; + unsigned char c; + + if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || + !isowbuf_startwrite(iwb)) { + dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", + __func__, isowbuf_freebytes(iwb)); + return -EAGAIN; + } + + dump_bytes(DEBUG_STREAM, "snd data", in, count); + + /* bitstuff and checksum input data */ + fcs = PPP_INITFCS; + ones = 0; + while (count-- > 0) { + c = *in++; + ones = hdlc_bitstuff_byte(iwb, c, ones); + fcs = crc_ccitt_byte(fcs, c); + } + + /* bitstuff and append FCS (complemented, least significant byte first) */ + fcs ^= 0xffff; + ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); + ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); + + /* put closing flag and repeat byte for flag idle */ + isowbuf_putflag(iwb); + end = isowbuf_donewrite(iwb); + dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); + return end; +} + +/* trans_buildframe + * Append a block of 'transparent' data to the output buffer, + * inverting the bytes. + * The output buffer must have sufficient length; count bytes + * starting at *out are safe and are verified to be present. + * parameters: + * in input buffer + * count number of bytes in input buffer + * iwb pointer to output buffer structure (write semaphore must be held) + * return value: + * position of end of packet in output buffer on success, + * -EAGAIN if write semaphore busy or buffer full + */ + +static inline int trans_buildframe(struct isowbuf_t *iwb, + unsigned char *in, int count) +{ + int write; + unsigned char c; + + if (unlikely(count <= 0)) + return atomic_read(&iwb->write); /* better ideas? */ + + if (isowbuf_freebytes(iwb) < count || + !isowbuf_startwrite(iwb)) { + dbg(DEBUG_ISO, "can't put %d bytes", count); + return -EAGAIN; + } + + dbg(DEBUG_STREAM, "put %d bytes", count); + write = atomic_read(&iwb->write); + do { + c = gigaset_invtab[*in++]; + iwb->data[write++] = c; + write %= BAS_OUTBUFSIZE; + } while (--count > 0); + atomic_set(&iwb->write, write); + iwb->idle = c; + + return isowbuf_donewrite(iwb); +} + +int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len) +{ + int result; + + switch (bcs->proto2) { + case ISDN_PROTO_L2_HDLC: + result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len); + dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", __func__, len, result); + break; + default: /* assume transparent */ + result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len); + dbg(DEBUG_ISO, "%s: %d bytes trans -> %d", __func__, len, result); + } + return result; +} + +/* hdlc_putbyte + * append byte c to current skb of B channel structure *bcs, updating fcs + */ +static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) +{ + bcs->fcs = crc_ccitt_byte(bcs->fcs, c); + if (unlikely(bcs->skb == NULL)) { + /* skipping */ + return; + } + if (unlikely(bcs->skb->len == SBUFSIZE)) { + warn("received oversized packet discarded"); + bcs->hw.bas->giants++; + dev_kfree_skb_any(bcs->skb); + bcs->skb = NULL; + return; + } + *gigaset_skb_put_quick(bcs->skb, 1) = c; +} + +/* hdlc_flush + * drop partial HDLC data packet + */ +static inline void hdlc_flush(struct bc_state *bcs) +{ + /* clear skb or allocate new if not skipping */ + if (likely(bcs->skb != NULL)) + skb_trim(bcs->skb, 0); + else if (!bcs->ignore) { + if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) + skb_reserve(bcs->skb, HW_HDR_LEN); + else + err("could not allocate skb"); + } + + /* reset packet state */ + bcs->fcs = PPP_INITFCS; +} + +/* hdlc_done + * process completed HDLC data packet + */ +static inline void hdlc_done(struct bc_state *bcs) +{ + struct sk_buff *procskb; + + if (unlikely(bcs->ignore)) { + bcs->ignore--; + hdlc_flush(bcs); + return; + } + + if ((procskb = bcs->skb) == NULL) { + /* previous error */ + dbg(DEBUG_ISO, "%s: skb=NULL", __func__); + gigaset_rcv_error(NULL, bcs->cs, bcs); + } else if (procskb->len < 2) { + notice("received short frame (%d octets)", procskb->len); + bcs->hw.bas->runts++; + gigaset_rcv_error(procskb, bcs->cs, bcs); + } else if (bcs->fcs != PPP_GOODFCS) { + notice("frame check error (0x%04x)", bcs->fcs); + bcs->hw.bas->fcserrs++; + gigaset_rcv_error(procskb, bcs->cs, bcs); + } else { + procskb->len -= 2; /* subtract FCS */ + procskb->tail -= 2; + dbg(DEBUG_ISO, + "%s: good frame (%d octets)", __func__, procskb->len); + dump_bytes(DEBUG_STREAM, + "rcv data", procskb->data, procskb->len); + bcs->hw.bas->goodbytes += procskb->len; + gigaset_rcv_skb(procskb, bcs->cs, bcs); + } + + if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) + skb_reserve(bcs->skb, HW_HDR_LEN); + else + err("could not allocate skb"); + bcs->fcs = PPP_INITFCS; +} + +/* hdlc_frag + * drop HDLC data packet with non-integral last byte + */ +static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits) +{ + if (unlikely(bcs->ignore)) { + bcs->ignore--; + hdlc_flush(bcs); + return; + } + + notice("received partial byte (%d bits)", inbits); + bcs->hw.bas->alignerrs++; + gigaset_rcv_error(bcs->skb, bcs->cs, bcs); + + if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) + skb_reserve(bcs->skb, HW_HDR_LEN); + else + err("could not allocate skb"); + bcs->fcs = PPP_INITFCS; +} + +/* bit counts lookup table for HDLC bit unstuffing + * index: input byte + * value: bit 0..3 = number of consecutive '1' bits starting from LSB + * bit 4..6 = number of consecutive '1' bits starting from MSB + * (replacing 8 by 7 to make it fit; the algorithm won't care) + * bit 7 set if there are 5 or more "interior" consecutive '1' bits + */ +static unsigned char bitcounts[256] = { + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07, + 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, + 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15, + 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, + 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16, + 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24, + 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25, + 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34, + 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78 +}; + +/* hdlc_unpack + * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation) + * on a sequence of received data bytes (8 bits each, LSB first) + * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb + * notify of errors via gigaset_rcv_error + * tally frames, errors etc. in BC structure counters + * parameters: + * src received data + * count number of received bytes + * bcs receiving B channel structure + */ +static inline void hdlc_unpack(unsigned char *src, unsigned count, + struct bc_state *bcs) +{ + struct bas_bc_state *ubc; + int inputstate; + unsigned seqlen, inbyte, inbits; + + IFNULLRET(bcs); + ubc = bcs->hw.bas; + IFNULLRET(ubc); + + /* load previous state: + * inputstate = set of flag bits: + * - INS_flag_hunt: no complete opening flag received since connection setup or last abort + * - INS_have_data: at least one complete data byte received since last flag + * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7) + * inbyte = accumulated partial data byte (if !INS_flag_hunt) + * inbits = number of valid bits in inbyte, starting at LSB (0..6) + */ + inputstate = bcs->inputstate; + seqlen = ubc->seqlen; + inbyte = ubc->inbyte; + inbits = ubc->inbits; + + /* bit unstuffing a byte a time + * Take your time to understand this; it's straightforward but tedious. + * The "bitcounts" lookup table is used to speed up the counting of + * leading and trailing '1' bits. + */ + while (count--) { + unsigned char c = *src++; + unsigned char tabentry = bitcounts[c]; + unsigned lead1 = tabentry & 0x0f; + unsigned trail1 = (tabentry >> 4) & 0x0f; + + seqlen += lead1; + + if (unlikely(inputstate & INS_flag_hunt)) { + if (c == PPP_FLAG) { + /* flag-in-one */ + inputstate &= ~(INS_flag_hunt | INS_have_data); + inbyte = 0; + inbits = 0; + } else if (seqlen == 6 && trail1 != 7) { + /* flag completed & not followed by abort */ + inputstate &= ~(INS_flag_hunt | INS_have_data); + inbyte = c >> (lead1 + 1); + inbits = 7 - lead1; + if (trail1 >= 8) { + /* interior stuffing: omitting the MSB handles most cases */ + inbits--; + /* correct the incorrectly handled cases individually */ + switch (c) { + case 0xbe: + inbyte = 0x3f; + break; + } + } + } + /* else: continue flag-hunting */ + } else if (likely(seqlen < 5 && trail1 < 7)) { + /* streamlined case: 8 data bits, no stuffing */ + inbyte |= c << inbits; + hdlc_putbyte(inbyte & 0xff, bcs); + inputstate |= INS_have_data; + inbyte >>= 8; + /* inbits unchanged */ + } else if (likely(seqlen == 6 && inbits == 7 - lead1 && + trail1 + 1 == inbits && + !(inputstate & INS_have_data))) { + /* streamlined case: flag idle - state unchanged */ + } else if (unlikely(seqlen > 6)) { + /* abort sequence */ + ubc->aborts++; + hdlc_flush(bcs); + inputstate |= INS_flag_hunt; + } else if (seqlen == 6) { + /* closing flag, including (6 - lead1) '1's and one '0' from inbits */ + if (inbits > 7 - lead1) { + hdlc_frag(bcs, inbits + lead1 - 7); + inputstate &= ~INS_have_data; + } else { + if (inbits < 7 - lead1) + ubc->stolen0s ++; + if (inputstate & INS_have_data) { + hdlc_done(bcs); + inputstate &= ~INS_have_data; + } + } + + if (c == PPP_FLAG) { + /* complete flag, LSB overlaps preceding flag */ + ubc->shared0s ++; + inbits = 0; + inbyte = 0; + } else if (trail1 != 7) { + /* remaining bits */ + inbyte = c >> (lead1 + 1); + inbits = 7 - lead1; + if (trail1 >= 8) { + /* interior stuffing: omitting the MSB handles most cases */ + inbits--; + /* correct the incorrectly handled cases individually */ + switch (c) { + case 0xbe: + inbyte = 0x3f; + break; + } + } + } else { + /* abort sequence follows, skb already empty anyway */ + ubc->aborts++; + inputstate |= INS_flag_hunt; + } + } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */ + + if (c == PPP_FLAG) { + /* complete flag */ + if (seqlen == 5) + ubc->stolen0s++; + if (inbits) { + hdlc_frag(bcs, inbits); + inbits = 0; + inbyte = 0; + } else if (inputstate & INS_have_data) + hdlc_done(bcs); + inputstate &= ~INS_have_data; + } else if (trail1 == 7) { + /* abort sequence */ + ubc->aborts++; + hdlc_flush(bcs); + inputstate |= INS_flag_hunt; + } else { + /* stuffed data */ + if (trail1 < 7) { /* => seqlen == 5 */ + /* stuff bit at position lead1, no interior stuffing */ + unsigned char mask = (1 << lead1) - 1; + c = (c & mask) | ((c & ~mask) >> 1); + inbyte |= c << inbits; + inbits += 7; + } else if (seqlen < 5) { /* trail1 >= 8 */ + /* interior stuffing: omitting the MSB handles most cases */ + /* correct the incorrectly handled cases individually */ + switch (c) { + case 0xbe: + c = 0x7e; + break; + } + inbyte |= c << inbits; + inbits += 7; + } else { /* seqlen == 5 && trail1 >= 8 */ + + /* stuff bit at lead1 *and* interior stuffing */ + switch (c) { /* unstuff individually */ + case 0x7d: + c = 0x3f; + break; + case 0xbe: + c = 0x3f; + break; + case 0x3e: + c = 0x1f; + break; + case 0x7c: + c = 0x3e; + break; + } + inbyte |= c << inbits; + inbits += 6; + } + if (inbits >= 8) { + inbits -= 8; + hdlc_putbyte(inbyte & 0xff, bcs); + inputstate |= INS_have_data; + inbyte >>= 8; + } + } + } + seqlen = trail1 & 7; + } + + /* save new state */ + bcs->inputstate = inputstate; + ubc->seqlen = seqlen; + ubc->inbyte = inbyte; + ubc->inbits = inbits; +} + +/* trans_receive + * pass on received USB frame transparently as SKB via gigaset_rcv_skb + * invert bytes + * tally frames, errors etc. in BC structure counters + * parameters: + * src received data + * count number of received bytes + * bcs receiving B channel structure + */ +static inline void trans_receive(unsigned char *src, unsigned count, + struct bc_state *bcs) +{ + struct sk_buff *skb; + int dobytes; + unsigned char *dst; + + if (unlikely(bcs->ignore)) { + bcs->ignore--; + hdlc_flush(bcs); + return; + } + if (unlikely((skb = bcs->skb) == NULL)) { + bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); + if (!skb) { + err("could not allocate skb"); + return; + } + skb_reserve(skb, HW_HDR_LEN); + } + bcs->hw.bas->goodbytes += skb->len; + dobytes = TRANSBUFSIZE - skb->len; + while (count > 0) { + dst = skb_put(skb, count < dobytes ? count : dobytes); + while (count > 0 && dobytes > 0) { + *dst++ = gigaset_invtab[*src++]; + count--; + dobytes--; + } + if (dobytes == 0) { + gigaset_rcv_skb(skb, bcs->cs, bcs); + bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); + if (!skb) { + err("could not allocate skb"); + return; + } + skb_reserve(bcs->skb, HW_HDR_LEN); + dobytes = TRANSBUFSIZE; + } + } +} + +void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs) +{ + switch (bcs->proto2) { + case ISDN_PROTO_L2_HDLC: + hdlc_unpack(src, count, bcs); + break; + default: /* assume transparent */ + trans_receive(src, count, bcs); + } +} + +/* == data input =========================================================== */ + +static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + unsigned cbytes = cs->cbytes; + + while (numbytes--) { + /* copy next character, check for end of line */ + switch (cs->respdata[cbytes] = *src++) { + case '\r': + case '\n': + /* end of line */ + dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", + __func__, cbytes); + cs->cbytes = cbytes; + gigaset_handle_modem_response(cs); + cbytes = 0; + break; + default: + /* advance in line buffer, checking for overflow */ + if (cbytes < MAX_RESP_SIZE - 1) + cbytes++; + else + warn("response too large"); + } + } + + /* save state */ + cs->cbytes = cbytes; +} + + +/* process a block of data received through the control channel + */ +void gigaset_isoc_input(struct inbuf_t *inbuf) +{ + struct cardstate *cs = inbuf->cs; + unsigned tail, head, numbytes; + unsigned char *src; + + head = atomic_read(&inbuf->head); + while (head != (tail = atomic_read(&inbuf->tail))) { + dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); + if (head > tail) + tail = RBUFSIZE; + src = inbuf->data + head; + numbytes = tail - head; + dbg(DEBUG_INTR, "processing %u bytes", numbytes); + + if (atomic_read(&cs->mstate) == MS_LOCKED) { + gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", + numbytes, src, 0); + gigaset_if_receive(inbuf->cs, src, numbytes); + } else { + gigaset_dbg_buffer(DEBUG_CMD, "received response", + numbytes, src, 0); + cmd_loop(src, numbytes, inbuf); + } + + head += numbytes; + if (head == RBUFSIZE) + head = 0; + dbg(DEBUG_INTR, "setting head to %u", head); + atomic_set(&inbuf->head, head); + } +} + + +/* == data output ========================================================== */ + +/* gigaset_send_skb + * called by common.c to queue an skb for sending + * and start transmission if necessary + * parameters: + * B Channel control structure + * skb + * return value: + * number of bytes accepted for sending + * (skb->len if ok, 0 if out of buffer space) + * or error code (< 0, eg. -EINVAL) + */ +int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) +{ + int len; + + IFNULLRETVAL(bcs, -EFAULT); + IFNULLRETVAL(skb, -EFAULT); + len = skb->len; + + skb_queue_tail(&bcs->squeue, skb); + dbg(DEBUG_ISO, + "%s: skb queued, qlen=%d", __func__, skb_queue_len(&bcs->squeue)); + + /* tasklet submits URB if necessary */ + tasklet_schedule(&bcs->hw.bas->sent_tasklet); + + return len; /* ok so far */ +} diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c new file mode 100644 index 000000000000..c6915fa2be6c --- /dev/null +++ b/drivers/isdn/gigaset/proc.c @@ -0,0 +1,81 @@ +/* + * Stuff used by all variants of the driver + * + * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, + * Hansjoerg Lipp <hjlipp@web.de>, + * Tilman Schmidt <tilman@imap.cc>. + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: proc.c,v 1.5.2.13 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" +#include <linux/ctype.h> + +static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct cardstate *cs = usb_get_intfdata(intf); + return sprintf(buf, "%d\n", atomic_read(&cs->cidmode)); // FIXME use scnprintf for 13607 bit architectures (if PAGE_SIZE==4096) +} + +static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct cardstate *cs = usb_get_intfdata(intf); + long int value; + char *end; + + value = simple_strtol(buf, &end, 0); + while (*end) + if (!isspace(*end++)) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + + if (down_interruptible(&cs->sem)) + return -ERESTARTSYS; // FIXME -EINTR? + + cs->waiting = 1; + if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, + NULL, value, NULL)) { + cs->waiting = 0; + up(&cs->sem); + return -ENOMEM; + } + + dbg(DEBUG_CMD, "scheduling PROC_CIDMODE"); + gigaset_schedule_event(cs); + + wait_event(cs->waitqueue, !cs->waiting); + + up(&cs->sem); + + return count; +} + +static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode); + +/* free sysfs for device */ +void gigaset_free_dev_sysfs(struct usb_interface *interface) +{ + dbg(DEBUG_INIT, "removing sysfs entries"); + device_remove_file(&interface->dev, &dev_attr_cidmode); +} +EXPORT_SYMBOL_GPL(gigaset_free_dev_sysfs); + +/* initialize sysfs for device */ +void gigaset_init_dev_sysfs(struct usb_interface *interface) +{ + dbg(DEBUG_INIT, "setting up sysfs"); + device_create_file(&interface->dev, &dev_attr_cidmode); +} +EXPORT_SYMBOL_GPL(gigaset_init_dev_sysfs); diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c new file mode 100644 index 000000000000..323fc7349dec --- /dev/null +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -0,0 +1,1008 @@ +/* + * USB driver for Gigaset 307x directly or using M105 Data. + * + * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de> + * and Hansjoerg Lipp <hjlipp@web.de>. + * + * This driver was derived from the USB skeleton driver by + * Greg Kroah-Hartman <greg@kroah.com> + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * ToDo: ... + * ===================================================================== + * Version: $Id: usb-gigaset.c,v 1.85.4.18 2006/02/04 18:28:16 hjlipp Exp $ + * ===================================================================== + */ + +#include "gigaset.h" + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/usb.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +/* Version Information */ +#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>" +#define DRIVER_DESC "USB Driver for Gigaset 307x using M105" + +/* Module parameters */ + +static int startmode = SM_ISDN; +static int cidmode = 1; + +module_param(startmode, int, S_IRUGO); +module_param(cidmode, int, S_IRUGO); +MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); +MODULE_PARM_DESC(cidmode, "Call-ID mode"); + +#define GIGASET_MINORS 1 +#define GIGASET_MINOR 8 +#define GIGASET_MODULENAME "usb_gigaset" +#define GIGASET_DEVFSNAME "gig/usb/" +#define GIGASET_DEVNAME "ttyGU" + +#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256 + +/* Values for the Gigaset M105 Data */ +#define USB_M105_VENDOR_ID 0x0681 +#define USB_M105_PRODUCT_ID 0x0009 + +/* table of devices that work with this driver */ +static struct usb_device_id gigaset_table [] = { + { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, gigaset_table); + +/* Get a minor range for your devices from the usb maintainer */ +#define USB_SKEL_MINOR_BASE 200 + + +/* + * Control requests (empty fields: 00) + * + * RT|RQ|VALUE|INDEX|LEN |DATA + * In: + * C1 08 01 + * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:? + * C1 0F ll ll + * Get device information/status (llll: 0x200 and 0x40 seen). + * Real size: I only saw MIN(llll,0x64). + * Contents: seems to be always the same... + * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes) + * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0" + * rest: ? + * Out: + * 41 11 + * Initialize/reset device ? + * 41 00 xx 00 + * ? (xx=00 or 01; 01 on start, 00 on close) + * 41 07 vv mm + * Set/clear flags vv=value, mm=mask (see RQ 08) + * 41 12 xx + * Used before the following configuration requests are issued + * (with xx=0x0f). I've seen other values<0xf, though. + * 41 01 xx xx + * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1. + * 41 03 ps bb + * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity + * [ 0x30: m, 0x40: s ] + * [s: 0: 1 stop bit; 1: 1.5; 2: 2] + * bb: bits/byte (seen 7 and 8) + * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00 + * ?? + * Initialization: 01, 40, 00, 00 + * Open device: 00 40, 00, 00 + * yy and zz seem to be equal, either 0x00 or 0x0a + * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80) + * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 + * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). + * xx is usually 0x00 but was 0x7e before starting data transfer + * in unimodem mode. So, this might be an array of characters that need + * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. + * + * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two + * flags per packet. + */ + +static int gigaset_probe(struct usb_interface *interface, + const struct usb_device_id *id); +static void gigaset_disconnect(struct usb_interface *interface); + +static struct gigaset_driver *driver = NULL; +static struct cardstate *cardstate = NULL; + +/* usb specific object needed to register this driver with the usb subsystem */ +static struct usb_driver gigaset_usb_driver = { + .name = GIGASET_MODULENAME, + .probe = gigaset_probe, + .disconnect = gigaset_disconnect, + .id_table = gigaset_table, +}; + +struct usb_cardstate { + struct usb_device *udev; /* save off the usb device pointer */ + struct usb_interface *interface; /* the interface for this device */ + atomic_t busy; /* bulk output in progress */ + + /* Output buffer for commands (M105: and data)*/ + unsigned char *bulk_out_buffer; /* the buffer to send data */ + int bulk_out_size; /* the size of the send buffer */ + __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ + struct urb *bulk_out_urb; /* the urb used to transmit data */ + + /* Input buffer for command responses (M105: and data)*/ + int rcvbuf_size; /* the size of the receive buffer */ + struct urb *read_urb; /* the urb used to receive data */ + __u8 int_in_endpointAddr; /* the address of the bulk in endpoint */ + + char bchars[6]; /* req. 0x19 */ +}; + +struct usb_bc_state {}; + +static inline unsigned tiocm_to_gigaset(unsigned state) +{ + return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0); +} + +#ifdef CONFIG_GIGASET_UNDOCREQ +/* WARNING: EXPERIMENTAL! */ +static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, + unsigned new_state) +{ + unsigned mask, val; + int r; + + mask = tiocm_to_gigaset(old_state ^ new_state); + val = tiocm_to_gigaset(new_state); + + dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); + r = usb_control_msg(cs->hw.usb->udev, + usb_sndctrlpipe(cs->hw.usb->udev, 0), 7, 0x41, + (val & 0xff) | ((mask & 0xff) << 8), 0, + NULL, 0, 2000 /*timeout??*/); // don't use this in an interrupt/BH + if (r < 0) + return r; + //.. + return 0; +} + +static int set_value(struct cardstate *cs, u8 req, u16 val) +{ + int r, r2; + + dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val); + r = usb_control_msg(cs->hw.usb->udev, + usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x12, 0x41, + 0xf /*?*/, 0, + NULL, 0, 2000 /*?*/); /* no idea, what this does */ + if (r < 0) { + err("error %d on request 0x12", -r); + return r; + } + + r = usb_control_msg(cs->hw.usb->udev, + usb_sndctrlpipe(cs->hw.usb->udev, 0), req, 0x41, + val, 0, + NULL, 0, 2000 /*?*/); + if (r < 0) + err("error %d on request 0x%02x", -r, (unsigned)req); + + r2 = usb_control_msg(cs->hw.usb->udev, + usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41, + 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); + if (r2 < 0) + err("error %d on request 0x19", -r2); + + return r < 0 ? r : (r2 < 0 ? r2 : 0); +} + +/* WARNING: HIGHLY EXPERIMENTAL! */ +// don't use this in an interrupt/BH +static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) +{ + u16 val; + u32 rate; + + cflag &= CBAUD; + + switch (cflag) { + //FIXME more values? + case B300: rate = 300; break; + case B600: rate = 600; break; + case B1200: rate = 1200; break; + case B2400: rate = 2400; break; + case B4800: rate = 4800; break; + case B9600: rate = 9600; break; + case B19200: rate = 19200; break; + case B38400: rate = 38400; break; + case B57600: rate = 57600; break; + case B115200: rate = 115200; break; + default: + rate = 9600; + err("unsupported baudrate request 0x%x," + " using default of B9600", cflag); + } + + val = 0x383fff / rate + 1; + + return set_value(cs, 1, val); +} + +/* WARNING: HIGHLY EXPERIMENTAL! */ +// don't use this in an interrupt/BH +static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) +{ + u16 val = 0; + + /* set the parity */ + if (cflag & PARENB) + val |= (cflag & PARODD) ? 0x10 : 0x20; + + /* set the number of data bits */ + switch (cflag & CSIZE) { + case CS5: + val |= 5 << 8; break; + case CS6: + val |= 6 << 8; break; + case CS7: + val |= 7 << 8; break; + case CS8: + val |= 8 << 8; break; + default: + err("CSIZE was not CS5-CS8, using default of 8"); + val |= 8 << 8; + break; + } + + /* set the number of stop bits */ + if (cflag & CSTOPB) { + if ((cflag & CSIZE) == CS5) + val |= 1; /* 1.5 stop bits */ //FIXME is this okay? + else + val |= 2; /* 2 stop bits */ + } + + return set_value(cs, 3, val); +} + +#else +static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, + unsigned new_state) +{ + return -EINVAL; +} + +static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) +{ + return -EINVAL; +} + +static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) +{ + return -EINVAL; +} +#endif + + + /*================================================================================================================*/ +static int gigaset_init_bchannel(struct bc_state *bcs) +{ + /* nothing to do for M10x */ + gigaset_bchannel_up(bcs); + return 0; +} + +static int gigaset_close_bchannel(struct bc_state *bcs) +{ + /* nothing to do for M10x */ + gigaset_bchannel_down(bcs); + return 0; +} + +//void send_ack_to_LL(void *data); +static int write_modem(struct cardstate *cs); +static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb); + + +/* Handling of send queue. If there is already a skb opened, put data to + * the transfer buffer by calling "write_modem". Otherwise take a new skb out of the queue. + * This function will be called by the ISR via "transmit_chars" (USB: B-Channel Bulk callback handler + * via immediate task queue) or by writebuf_from_LL if the LL wants to transmit data. + */ +static void gigaset_modem_fill(unsigned long data) +{ + struct cardstate *cs = (struct cardstate *) data; + struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ + struct cmdbuf_t *cb; + unsigned long flags; + int again; + + dbg(DEBUG_OUTPUT, "modem_fill"); + + if (atomic_read(&cs->hw.usb->busy)) { + dbg(DEBUG_OUTPUT, "modem_fill: busy"); + return; + } + + do { + again = 0; + if (!bcs->tx_skb) { /* no skb is being sent */ + spin_lock_irqsave(&cs->cmdlock, flags); + cb = cs->cmdbuf; + spin_unlock_irqrestore(&cs->cmdlock, flags); + if (cb) { /* commands to send? */ + dbg(DEBUG_OUTPUT, "modem_fill: cb"); + if (send_cb(cs, cb) < 0) { + dbg(DEBUG_OUTPUT, + "modem_fill: send_cb failed"); + again = 1; /* no callback will be called! */ + } + } else { /* skbs to send? */ + bcs->tx_skb = skb_dequeue(&bcs->squeue); + if (bcs->tx_skb) + dbg(DEBUG_INTR, + "Dequeued skb (Adr: %lx)!", + (unsigned long) bcs->tx_skb); + } + } + + if (bcs->tx_skb) { + dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); + if (write_modem(cs) < 0) { + dbg(DEBUG_OUTPUT, + "modem_fill: write_modem failed"); + // FIXME should we tell the LL? + again = 1; /* no callback will be called! */ + } + } + } while (again); +} + +/** + * gigaset_read_int_callback + * + * It is called if the data was received from the device. This is almost similiar to + * the interrupt service routine in the serial device. + */ +static void gigaset_read_int_callback(struct urb *urb, struct pt_regs *regs) +{ + int resubmit = 0; + int r; + struct cardstate *cs; + unsigned numbytes; + unsigned char *src; + //unsigned long flags; + struct inbuf_t *inbuf; + + IFNULLRET(urb); + inbuf = (struct inbuf_t *) urb->context; + IFNULLRET(inbuf); + //spin_lock_irqsave(&inbuf->lock, flags); + cs = inbuf->cs; + IFNULLGOTO(cs, exit); + IFNULLGOTO(cardstate, exit); + + if (!atomic_read(&cs->connected)) { + err("%s: disconnected", __func__); + goto exit; + } + + if (!urb->status) { + numbytes = urb->actual_length; + + if (numbytes) { + src = inbuf->rcvbuf; + if (unlikely(*src)) + warn("%s: There was no leading 0, but 0x%02x!", + __func__, (unsigned) *src); + ++src; /* skip leading 0x00 */ + --numbytes; + if (gigaset_fill_inbuf(inbuf, src, numbytes)) { + dbg(DEBUG_INTR, "%s-->BH", __func__); + gigaset_schedule_event(inbuf->cs); + } + } else + dbg(DEBUG_INTR, "Received zero block length"); + resubmit = 1; + } else { + /* The urb might have been killed. */ + dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d", + __func__, urb->status); + if (urb->status != -ENOENT) /* not killed */ + resubmit = 1; + } +exit: + //spin_unlock_irqrestore(&inbuf->lock, flags); + if (resubmit) { + r = usb_submit_urb(urb, SLAB_ATOMIC); + if (r) + err("error %d when resubmitting urb.", -r); + } +} + + +/* This callback routine is called when data was transmitted to a B-Channel. + * Therefore it has to check if there is still data to transmit. This + * happens by calling modem_fill via task queue. + * + */ +static void gigaset_write_bulk_callback(struct urb *urb, struct pt_regs *regs) +{ + struct cardstate *cs = (struct cardstate *) urb->context; + + IFNULLRET(cs); +#ifdef CONFIG_GIGASET_DEBUG + if (!atomic_read(&cs->connected)) { + err("%s:not connected", __func__); + return; + } +#endif + if (urb->status) + err("bulk transfer failed (status %d)", -urb->status); /* That's all we can do. Communication problems + are handeled by timeouts or network protocols */ + + atomic_set(&cs->hw.usb->busy, 0); + tasklet_schedule(&cs->write_tasklet); +} + +static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) +{ + struct cmdbuf_t *tcb; + unsigned long flags; + int count; + int status = -ENOENT; // FIXME + struct usb_cardstate *ucs = cs->hw.usb; + + do { + if (!cb->len) { + tcb = cb; + + spin_lock_irqsave(&cs->cmdlock, flags); + cs->cmdbytes -= cs->curlen; + dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left", + cs->curlen, cs->cmdbytes); + cs->cmdbuf = cb = cb->next; + if (cb) { + cb->prev = NULL; + cs->curlen = cb->len; + } else { + cs->lastcmdbuf = NULL; + cs->curlen = 0; + } + spin_unlock_irqrestore(&cs->cmdlock, flags); + + if (tcb->wake_tasklet) + tasklet_schedule(tcb->wake_tasklet); + kfree(tcb); + } + if (cb) { + count = min(cb->len, ucs->bulk_out_size); + usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, + usb_sndbulkpipe(ucs->udev, + ucs->bulk_out_endpointAddr & 0x0f), + cb->buf + cb->offset, count, + gigaset_write_bulk_callback, cs); + + cb->offset += count; + cb->len -= count; + atomic_set(&ucs->busy, 1); + dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count); + + status = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC); + if (status) { + atomic_set(&ucs->busy, 0); + err("could not submit urb (error %d).", + -status); + cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */ + } + } + } while (cb && status); /* bei Fehler naechster Befehl //FIXME: ist das OK? */ + + return status; +} + +/* Write string into transbuf and send it to modem. + */ +static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, + int len, struct tasklet_struct *wake_tasklet) +{ + struct cmdbuf_t *cb; + unsigned long flags; + + gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? + DEBUG_TRANSCMD : DEBUG_LOCKCMD, + "CMD Transmit", len, buf, 0); + + if (!atomic_read(&cs->connected)) { + err("%s: not connected", __func__); + return -ENODEV; + } + + if (len <= 0) + return 0; + + if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { + err("%s: out of memory", __func__); + return -ENOMEM; + } + + memcpy(cb->buf, buf, len); + cb->len = len; + cb->offset = 0; + cb->next = NULL; + cb->wake_tasklet = wake_tasklet; + + spin_lock_irqsave(&cs->cmdlock, flags); + cb->prev = cs->lastcmdbuf; + if (cs->lastcmdbuf) + cs->lastcmdbuf->next = cb; + else { + cs->cmdbuf = cb; + cs->curlen = len; + } + cs->cmdbytes += len; + cs->lastcmdbuf = cb; + spin_unlock_irqrestore(&cs->cmdlock, flags); + + tasklet_schedule(&cs->write_tasklet); + return len; +} + +static int gigaset_write_room(struct cardstate *cs) +{ + unsigned long flags; + unsigned bytes; + + spin_lock_irqsave(&cs->cmdlock, flags); + bytes = cs->cmdbytes; + spin_unlock_irqrestore(&cs->cmdlock, flags); + + return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0; +} + +static int gigaset_chars_in_buffer(struct cardstate *cs) +{ + return cs->cmdbytes; +} + +static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) +{ +#ifdef CONFIG_GIGASET_UNDOCREQ + gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf, 0); + memcpy(cs->hw.usb->bchars, buf, 6); + return usb_control_msg(cs->hw.usb->udev, + usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41, + 0, 0, &buf, 6, 2000); +#else + return -EINVAL; +#endif +} + +static int gigaset_freebcshw(struct bc_state *bcs) +{ + if (!bcs->hw.usb) + return 0; + //FIXME + kfree(bcs->hw.usb); + return 1; +} + +/* Initialize the b-channel structure */ +static int gigaset_initbcshw(struct bc_state *bcs) +{ + bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL); + if (!bcs->hw.usb) + return 0; + + //bcs->hw.usb->trans_flg = READY_TO_TRNSMIT; /* B-Channel ready to transmit */ + return 1; +} + +static void gigaset_reinitbcshw(struct bc_state *bcs) +{ +} + +static void gigaset_freecshw(struct cardstate *cs) +{ + //FIXME + tasklet_kill(&cs->write_tasklet); + kfree(cs->hw.usb); +} + +static int gigaset_initcshw(struct cardstate *cs) +{ + struct usb_cardstate *ucs; + + cs->hw.usb = ucs = + kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); + if (!ucs) + return 0; + + ucs->bchars[0] = 0; + ucs->bchars[1] = 0; + ucs->bchars[2] = 0; + ucs->bchars[3] = 0; + ucs->bchars[4] = 0x11; + ucs->bchars[5] = 0x13; + ucs->bulk_out_buffer = NULL; + ucs->bulk_out_urb = NULL; + //ucs->urb_cmd_out = NULL; + ucs->read_urb = NULL; + tasklet_init(&cs->write_tasklet, + &gigaset_modem_fill, (unsigned long) cs); + + return 1; +} + +/* Writes the data of the current open skb into the modem. + * We have to protect against multiple calls until the + * callback handler () is called , due to the fact that we + * are just allowed to send data once to an endpoint. Therefore + * we using "trans_flg" to synchonize ... + */ +static int write_modem(struct cardstate *cs) +{ + int ret; + int count; + struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ + struct usb_cardstate *ucs = cs->hw.usb; + //unsigned long flags; + + IFNULLRETVAL(bcs->tx_skb, -EINVAL); + + dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len); + + ret = -ENODEV; + IFNULLGOTO(ucs->bulk_out_buffer, error); + IFNULLGOTO(ucs->bulk_out_urb, error); + ret = 0; + + if (!bcs->tx_skb->len) { + dev_kfree_skb_any(bcs->tx_skb); + bcs->tx_skb = NULL; + return -EINVAL; + } + + /* Copy data to bulk out buffer and // FIXME copying not necessary + * transmit data + */ + count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); + memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); + skb_pull(bcs->tx_skb, count); + + usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, + usb_sndbulkpipe(ucs->udev, + ucs->bulk_out_endpointAddr & 0x0f), + ucs->bulk_out_buffer, count, + gigaset_write_bulk_callback, cs); + atomic_set(&ucs->busy, 1); + dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); + + ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC); + if (ret) { + err("could not submit urb (error %d).", -ret); + atomic_set(&ucs->busy, 0); + } + if (!bcs->tx_skb->len) { + /* skb sent completely */ + gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0? + + dbg(DEBUG_INTR, + "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); + dev_kfree_skb_any(bcs->tx_skb); + bcs->tx_skb = NULL; + } + + return ret; +error: + dev_kfree_skb_any(bcs->tx_skb); + bcs->tx_skb = NULL; + return ret; + +} + +static int gigaset_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + int retval; + struct usb_device *udev = interface_to_usbdev(interface); + unsigned int ifnum; + struct usb_host_interface *hostif; + struct cardstate *cs = NULL; + struct usb_cardstate *ucs = NULL; + //struct usb_interface_descriptor *iface_desc; + struct usb_endpoint_descriptor *endpoint; + //isdn_ctrl command; + int buffer_size; + int alt; + //unsigned long flags; + + info("%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)", + __func__, le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + + retval = -ENODEV; //FIXME + + /* See if the device offered us matches what we can accept */ + if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || + (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) + return -ENODEV; + + /* this starts to become ascii art... */ + hostif = interface->cur_altsetting; + alt = hostif->desc.bAlternateSetting; + ifnum = hostif->desc.bInterfaceNumber; // FIXME ? + + if (alt != 0 || ifnum != 0) { + warn("ifnum %d, alt %d", ifnum, alt); + return -ENODEV; + } + + /* Reject application specific intefaces + * + */ + if (hostif->desc.bInterfaceClass != 255) { + info("%s: Device matched, but iface_desc[%d]->bInterfaceClass==%d !", + __func__, ifnum, hostif->desc.bInterfaceClass); + return -ENODEV; + } + + info("%s: Device matched ... !", __func__); + + cs = gigaset_getunassignedcs(driver); + if (!cs) { + warn("No free cardstate!"); + return -ENODEV; + } + ucs = cs->hw.usb; + +#if 0 + if (usb_set_configuration(udev, udev->config[0].desc.bConfigurationValue) < 0) { + warn("set_configuration failed"); + goto error; + } + + + if (usb_set_interface(udev, ifnum/*==0*/, alt/*==0*/) < 0) { + warn("usb_set_interface failed, device %d interface %d altsetting %d", + udev->devnum, ifnum, alt); + goto error; + } +#endif + + /* set up the endpoint information */ + /* check out the endpoints */ + /* We will get 2 endpoints: One for sending commands to the device (bulk out) and one to + * poll messages from the device(int in). + * Therefore we will have an almost similiar situation as with our serial port handler. + * If an connection will be established, we will have to create data in/out pipes + * dynamically... + */ + + endpoint = &hostif->endpoint[0].desc; + + buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); + ucs->bulk_out_size = buffer_size; + ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress; + ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); + if (!ucs->bulk_out_buffer) { + err("Couldn't allocate bulk_out_buffer"); + retval = -ENOMEM; + goto error; + } + + ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->bulk_out_urb) { + err("Couldn't allocate bulk_out_buffer"); + retval = -ENOMEM; + goto error; + } + + endpoint = &hostif->endpoint[1].desc; + + atomic_set(&ucs->busy, 0); + ucs->udev = udev; + ucs->interface = interface; + + ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL); + if (!ucs->read_urb) { + err("No free urbs available"); + retval = -ENOMEM; + goto error; + } + buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); + ucs->rcvbuf_size = buffer_size; + ucs->int_in_endpointAddr = endpoint->bEndpointAddress; + cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL); + if (!cs->inbuf[0].rcvbuf) { + err("Couldn't allocate rcvbuf"); + retval = -ENOMEM; + goto error; + } + /* Fill the interrupt urb and send it to the core */ + usb_fill_int_urb(ucs->read_urb, udev, + usb_rcvintpipe(udev, + endpoint->bEndpointAddress & 0x0f), + cs->inbuf[0].rcvbuf, buffer_size, + gigaset_read_int_callback, + cs->inbuf + 0, endpoint->bInterval); + + retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL); + if (retval) { + err("Could not submit URB!"); + goto error; + } + + /* tell common part that the device is ready */ + if (startmode == SM_LOCKED) + atomic_set(&cs->mstate, MS_LOCKED); + if (!gigaset_start(cs)) { + tasklet_kill(&cs->write_tasklet); + retval = -ENODEV; //FIXME + goto error; + } + + /* save address of controller structure */ + usb_set_intfdata(interface, cs); + + /* set up device sysfs */ + gigaset_init_dev_sysfs(interface); + return 0; + +error: + if (ucs->read_urb) + usb_kill_urb(ucs->read_urb); + kfree(ucs->bulk_out_buffer); + if (ucs->bulk_out_urb != NULL) + usb_free_urb(ucs->bulk_out_urb); + kfree(cs->inbuf[0].rcvbuf); + if (ucs->read_urb != NULL) + usb_free_urb(ucs->read_urb); + ucs->read_urb = ucs->bulk_out_urb = NULL; + cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; + gigaset_unassign(cs); + return retval; +} + +/** + * skel_disconnect + */ +static void gigaset_disconnect(struct usb_interface *interface) +{ + struct cardstate *cs; + struct usb_cardstate *ucs; + + cs = usb_get_intfdata(interface); + + /* clear device sysfs */ + gigaset_free_dev_sysfs(interface); + + usb_set_intfdata(interface, NULL); + ucs = cs->hw.usb; + usb_kill_urb(ucs->read_urb); + //info("GigaSet USB device #%d will be disconnected", minor); + + gigaset_stop(cs); + + tasklet_kill(&cs->write_tasklet); + + usb_kill_urb(ucs->bulk_out_urb); /* FIXME: nur, wenn noetig */ + //usb_kill_urb(ucs->urb_cmd_out); /* FIXME: nur, wenn noetig */ + + kfree(ucs->bulk_out_buffer); + if (ucs->bulk_out_urb != NULL) + usb_free_urb(ucs->bulk_out_urb); + //if(ucs->urb_cmd_out != NULL) + // usb_free_urb(ucs->urb_cmd_out); + kfree(cs->inbuf[0].rcvbuf); + if (ucs->read_urb != NULL) + usb_free_urb(ucs->read_urb); + ucs->read_urb = ucs->bulk_out_urb/*=ucs->urb_cmd_out*/=NULL; + cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; + + gigaset_unassign(cs); +} + +static struct gigaset_ops ops = { + gigaset_write_cmd, + gigaset_write_room, + gigaset_chars_in_buffer, + gigaset_brkchars, + gigaset_init_bchannel, + gigaset_close_bchannel, + gigaset_initbcshw, + gigaset_freebcshw, + gigaset_reinitbcshw, + gigaset_initcshw, + gigaset_freecshw, + gigaset_set_modem_ctrl, + gigaset_baud_rate, + gigaset_set_line_ctrl, + gigaset_m10x_send_skb, + gigaset_m10x_input, +}; + +/** + * usb_gigaset_init + * This function is called while kernel-module is loaded + */ +static int __init usb_gigaset_init(void) +{ + int result; + + /* allocate memory for our driver state and intialize it */ + if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, + GIGASET_MODULENAME, GIGASET_DEVNAME, + GIGASET_DEVFSNAME, &ops, + THIS_MODULE)) == NULL) + goto error; + + /* allocate memory for our device state and intialize it */ + cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); + if (!cardstate) + goto error; + + /* register this driver with the USB subsystem */ + result = usb_register(&gigaset_usb_driver); + if (result < 0) { + err("usb_gigaset: usb_register failed (error %d)", + -result); + goto error; + } + + info(DRIVER_AUTHOR); + info(DRIVER_DESC); + return 0; + +error: if (cardstate) + gigaset_freecs(cardstate); + cardstate = NULL; + if (driver) + gigaset_freedriver(driver); + driver = NULL; + return -1; +} + + +/** + * usb_gigaset_exit + * This function is called while unloading the kernel-module + */ +static void __exit usb_gigaset_exit(void) +{ + gigaset_blockdriver(driver); /* => probe will fail + * => no gigaset_start any more + */ + + gigaset_shutdown(cardstate); + /* from now on, no isdn callback should be possible */ + + /* deregister this driver with the USB subsystem */ + usb_deregister(&gigaset_usb_driver); + /* this will call the disconnect-callback */ + /* from now on, no disconnect/probe callback should be running */ + + gigaset_freecs(cardstate); + cardstate = NULL; + gigaset_freedriver(driver); + driver = NULL; +} + + +module_init(usb_gigaset_init); +module_exit(usb_gigaset_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); + +MODULE_LICENSE("GPL"); diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h index 296d6a6f749f..3b431723c7cb 100644 --- a/drivers/isdn/hardware/avm/avmcard.h +++ b/drivers/isdn/hardware/avm/avmcard.h @@ -437,9 +437,7 @@ static inline unsigned int t1_get_slice(unsigned int base, #endif dp += i; i = 0; - if (i == 0) - break; - /* fall through */ + break; default: *dp++ = b1_get_byte(base); i--; diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index dc7ef957e897..dbcca287ee2c 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c @@ -387,8 +387,7 @@ static void hdlc_fill_fifo(struct fritz_bcs *bcs) DBG(0x40, "hdlc_fill_fifo"); - if (skb->len == 0) - BUG(); + BUG_ON(skb->len == 0); bcs->ctrl.sr.cmd &= ~HDLC_CMD_XME; if (bcs->tx_skb->len > bcs->fifo_size) { @@ -630,9 +629,7 @@ static void fritz_b_l2l1(struct hisax_if *ifc, int pr, void *arg) switch (pr) { case PH_DATA | REQUEST: - if (bcs->tx_skb) - BUG(); - + BUG_ON(bcs->tx_skb); bcs->tx_skb = skb; DBG_SKB(1, skb); hdlc_fill_fifo(bcs); diff --git a/drivers/isdn/hisax/hisax_isac.c b/drivers/isdn/hisax/hisax_isac.c index f4972f6c1f5d..81eac344bb03 100644 --- a/drivers/isdn/hisax/hisax_isac.c +++ b/drivers/isdn/hisax/hisax_isac.c @@ -476,12 +476,10 @@ static void isac_fill_fifo(struct isac *isac) unsigned char cmd; u_char *ptr; - if (!isac->tx_skb) - BUG(); + BUG_ON(!isac->tx_skb); count = isac->tx_skb->len; - if (count <= 0) - BUG(); + BUG_ON(count <= 0); DBG(DBG_IRQ, "count %d", count); @@ -859,8 +857,7 @@ void isac_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg) dev_kfree_skb(skb); break; } - if (isac->tx_skb) - BUG(); + BUG_ON(isac->tx_skb); isac->tx_skb = skb; isac_fill_fifo(isac); diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c index 657817a591fe..22fd5db18d48 100644 --- a/drivers/isdn/hisax/st5481_b.c +++ b/drivers/isdn/hisax/st5481_b.c @@ -356,9 +356,7 @@ void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg) switch (pr) { case PH_DATA | REQUEST: - if (bcs->b_out.tx_skb) - BUG(); - + BUG_ON(bcs->b_out.tx_skb); bcs->b_out.tx_skb = skb; break; case PH_ACTIVATE | REQUEST: diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c index 941f7022ada1..493dc94992e5 100644 --- a/drivers/isdn/hisax/st5481_d.c +++ b/drivers/isdn/hisax/st5481_d.c @@ -596,9 +596,7 @@ void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg) break; case PH_DATA | REQUEST: DBG(2, "PH_DATA REQUEST len %d", skb->len); - if (adapter->d_out.tx_skb) - BUG(); - + BUG_ON(adapter->d_out.tx_skb); adapter->d_out.tx_skb = skb; FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL); break; diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index 1789b607f090..a4f7288a1fc8 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig @@ -139,3 +139,4 @@ source "drivers/isdn/hysdn/Kconfig" endmenu +source "drivers/isdn/gigaset/Kconfig" diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index b9fed8a3bcc6..a0927d1b7a0c 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -974,8 +974,7 @@ void isdn_ppp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buf int slot; int proto; - if (net_dev->local->master) - BUG(); // we're called with the master device always + BUG_ON(net_dev->local->master); // we're called with the master device always slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { @@ -2527,8 +2526,7 @@ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb,struct ippp_struc printk(KERN_DEBUG "ippp: no decompressor defined!\n"); return skb; } - if (!stat) // if we have a compressor, stat has been set as well - BUG(); + BUG_ON(!stat); // if we have a compressor, stat has been set as well if((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG) ) { // compressed packets are compressed by their protocol type diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index d2ead1776c16..34fcabac5fdb 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -80,7 +80,7 @@ static struct adb_driver *adb_driver_list[] = { static struct class *adb_dev_class; struct adb_driver *adb_controller; -struct notifier_block *adb_client_list = NULL; +BLOCKING_NOTIFIER_HEAD(adb_client_list); static int adb_got_sleep; static int adb_inited; static pid_t adb_probe_task_pid; @@ -354,7 +354,8 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when) /* Stop autopoll */ if (adb_controller->autopoll) adb_controller->autopoll(0); - ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL); + ret = blocking_notifier_call_chain(&adb_client_list, + ADB_MSG_POWERDOWN, NULL); if (ret & NOTIFY_STOP_MASK) { up(&adb_probe_mutex); return PBOOK_SLEEP_REFUSE; @@ -391,7 +392,8 @@ do_adb_reset_bus(void) if (adb_controller->autopoll) adb_controller->autopoll(0); - nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL); + nret = blocking_notifier_call_chain(&adb_client_list, + ADB_MSG_PRE_RESET, NULL); if (nret & NOTIFY_STOP_MASK) { if (adb_controller->autopoll) adb_controller->autopoll(autopoll_devs); @@ -426,7 +428,8 @@ do_adb_reset_bus(void) } up(&adb_handler_sem); - nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL); + nret = blocking_notifier_call_chain(&adb_client_list, + ADB_MSG_POST_RESET, NULL); if (nret & NOTIFY_STOP_MASK) return -EBUSY; diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index c0b46bceb5df..f5779a73184d 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -1214,7 +1214,8 @@ static int __init adbhid_init(void) adbhid_probe(); - notifier_chain_register(&adb_client_list, &adbhid_adb_notifier); + blocking_notifier_chain_register(&adb_client_list, + &adbhid_adb_notifier); return 0; } diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 4eb05d7143d8..f4516ca7aa3a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -35,6 +35,7 @@ #include <linux/delay.h> #include <linux/sysdev.h> #include <linux/poll.h> +#include <linux/mutex.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -92,7 +93,7 @@ struct smu_device { * for now, just hard code that */ static struct smu_device *smu; -static DECLARE_MUTEX(smu_part_access); +static DEFINE_MUTEX(smu_part_access); static void smu_i2c_retry(unsigned long data); @@ -976,11 +977,11 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, if (interruptible) { int rc; - rc = down_interruptible(&smu_part_access); + rc = mutex_lock_interruptible(&smu_part_access); if (rc) return ERR_PTR(rc); } else - down(&smu_part_access); + mutex_lock(&smu_part_access); part = (struct smu_sdbp_header *)get_property(smu->of_node, pname, size); @@ -990,7 +991,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, if (part != NULL && size) *size = part->len << 2; } - up(&smu_part_access); + mutex_unlock(&smu_part_access); return part; } diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 4f5f3abc9cb3..0b5ff553e39a 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -187,7 +187,7 @@ extern int disable_kernel_backlight; int __fake_sleep; int asleep; -struct notifier_block *sleep_notifier_list; +BLOCKING_NOTIFIER_HEAD(sleep_notifier_list); #ifdef CONFIG_ADB static int adb_dev_map = 0; diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c index f08e52f2107b..35b70323e7e3 100644 --- a/drivers/macintosh/via-pmu68k.c +++ b/drivers/macintosh/via-pmu68k.c @@ -102,7 +102,7 @@ static int pmu_kind = PMU_UNKNOWN; static int pmu_fully_inited = 0; int asleep; -struct notifier_block *sleep_notifier_list; +BLOCKING_NOTIFIER_HEAD(sleep_notifier_list); static int pmu_probe(void); static int pmu_init(void); @@ -913,7 +913,8 @@ int powerbook_sleep(void) struct adb_request sleep_req; /* Notify device drivers */ - ret = notifier_call_chain(&sleep_notifier_list, PBOOK_SLEEP, NULL); + ret = blocking_notifier_call_chain(&sleep_notifier_list, + PBOOK_SLEEP, NULL); if (ret & NOTIFY_STOP_MASK) return -EBUSY; @@ -984,7 +985,7 @@ int powerbook_sleep(void) enable_irq(i); /* Notify drivers */ - notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL); + blocking_notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL); /* reenable ADB autopoll */ pmu_adb_autopoll(adb_dev_map); diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index 6c0ba04bc57a..ab3faa702d58 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c @@ -52,7 +52,7 @@ static LIST_HEAD(wf_controls); static LIST_HEAD(wf_sensors); static DEFINE_MUTEX(wf_lock); -static struct notifier_block *wf_client_list; +static BLOCKING_NOTIFIER_HEAD(wf_client_list); static int wf_client_count; static unsigned int wf_overtemp; static unsigned int wf_overtemp_counter; @@ -68,7 +68,7 @@ static struct platform_device wf_platform_device = { static inline void wf_notify(int event, void *param) { - notifier_call_chain(&wf_client_list, event, param); + blocking_notifier_call_chain(&wf_client_list, event, param); } int wf_critical_overtemp(void) @@ -398,7 +398,7 @@ int wf_register_client(struct notifier_block *nb) struct wf_sensor *sr; mutex_lock(&wf_lock); - rc = notifier_chain_register(&wf_client_list, nb); + rc = blocking_notifier_chain_register(&wf_client_list, nb); if (rc != 0) goto bail; wf_client_count++; @@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(wf_register_client); int wf_unregister_client(struct notifier_block *nb) { mutex_lock(&wf_lock); - notifier_chain_unregister(&wf_client_list, nb); + blocking_notifier_chain_unregister(&wf_client_list, nb); wf_client_count++; if (wf_client_count == 0) wf_stop_thread(); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index ac43f98062fd..fd2aae150ccc 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -127,6 +127,32 @@ config MD_RAID5 If unsure, say Y. +config MD_RAID5_RESHAPE + bool "Support adding drives to a raid-5 array (experimental)" + depends on MD_RAID5 && EXPERIMENTAL + ---help--- + A RAID-5 set can be expanded by adding extra drives. This + requires "restriping" the array which means (almost) every + block must be written to a different place. + + This option allows such restriping to be done while the array + is online. However it is still EXPERIMENTAL code. It should + work, but please be sure that you have backups. + + You will need a version of mdadm newer than 2.3.1. During the + early stage of reshape there is a critical section where live data + is being over-written. A crash during this time needs extra care + for recovery. The newer mdadm takes a copy of the data in the + critical section and will restore it, if necessary, after a crash. + + The mdadm usage is e.g. + mdadm --grow /dev/md1 --raid-disks=6 + to grow '/dev/md1' to having 6 disks. + + Note: The array can only be expanded, not contracted. + There should be enough spares already present to make the new + array workable. + config MD_RAID6 tristate "RAID-6 mode" depends on BLK_DEV_MD diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e1c18aa1d712..f8ffaee20ff8 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap) } #define WRITE_POOL_SIZE 256 -/* mempool for queueing pending writes on the bitmap file */ -static void *write_pool_alloc(gfp_t gfp_flags, void *data) -{ - return kmalloc(sizeof(struct page_list), gfp_flags); -} - -static void write_pool_free(void *ptr, void *data) -{ - kfree(ptr); -} /* * just a placeholder - calls kmalloc for bitmap pages @@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev) spin_lock_init(&bitmap->write_lock); INIT_LIST_HEAD(&bitmap->complete_pages); init_waitqueue_head(&bitmap->write_wait); - bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc, - write_pool_free, NULL); + bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE, + sizeof(struct page_list)); err = -ENOMEM; if (!bitmap->write_pool) goto error; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e7a650f9ca07..61a590bb6241 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -94,20 +94,6 @@ struct crypt_config { static kmem_cache_t *_crypt_io_pool; /* - * Mempool alloc and free functions for the page - */ -static void *mempool_alloc_page(gfp_t gfp_mask, void *data) -{ - return alloc_page(gfp_mask); -} - -static void mempool_free_page(void *page, void *data) -{ - __free_page(page); -} - - -/* * Different IV generation algorithms: * * plain: the initial vector is the 32-bit low-endian version of the sector @@ -532,6 +518,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) char *ivopts; unsigned int crypto_flags; unsigned int key_size; + unsigned long long tmpll; if (argc != 5) { ti->error = PFX "Not enough arguments"; @@ -630,15 +617,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } } - cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, - mempool_free_slab, _crypt_io_pool); + cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); if (!cc->io_pool) { ti->error = PFX "Cannot allocate crypt io mempool"; goto bad3; } - cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, - mempool_free_page, NULL); + cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { ti->error = PFX "Cannot allocate page mempool"; goto bad4; @@ -649,15 +634,17 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad5; } - if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { + if (sscanf(argv[2], "%llu", &tmpll) != 1) { ti->error = PFX "Invalid iv_offset sector"; goto bad5; } + cc->iv_offset = tmpll; - if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { + if (sscanf(argv[4], "%llu", &tmpll) != 1) { ti->error = PFX "Invalid device sector"; goto bad5; } + cc->start = tmpll; if (dm_get_device(ti, argv[3], cc->start, ti->len, dm_table_get_mode(ti->table), &cc->dev)) { @@ -901,8 +888,8 @@ static int crypt_status(struct dm_target *ti, status_type_t type, result[sz++] = '-'; } - DMEMIT(" " SECTOR_FORMAT " %s " SECTOR_FORMAT, - cc->iv_offset, cc->dev->name, cc->start); + DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, + cc->dev->name, (unsigned long long)cc->start); break; } return 0; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4809b209fbb1..da663d2ff552 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -32,16 +32,6 @@ struct io { static unsigned _num_ios; static mempool_t *_io_pool; -static void *alloc_io(gfp_t gfp_mask, void *pool_data) -{ - return kmalloc(sizeof(struct io), gfp_mask); -} - -static void free_io(void *element, void *pool_data) -{ - kfree(element); -} - static unsigned int pages_to_ios(unsigned int pages) { return 4 * pages; /* too many ? */ @@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios) } else { /* create new pool */ - _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); + _io_pool = mempool_create_kmalloc_pool(new_ios, + sizeof(struct io)); if (!_io_pool) return -ENOMEM; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 442e2be6052e..8edd6435414d 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/devfs_fs_kernel.h> #include <linux/dm-ioctl.h> +#include <linux/hdreg.h> #include <asm/uaccess.h> @@ -244,9 +245,9 @@ static void __hash_remove(struct hash_cell *hc) dm_table_put(table); } - dm_put(hc->md); if (hc->new_map) dm_table_put(hc->new_map); + dm_put(hc->md); free_cell(hc); } @@ -600,12 +601,22 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) */ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) { + struct mapped_device *md; + void *mdptr = NULL; + if (*param->uuid) return __get_uuid_cell(param->uuid); - else if (*param->name) + + if (*param->name) return __get_name_cell(param->name); - else - return dm_get_mdptr(huge_decode_dev(param->dev)); + + md = dm_get_md(huge_decode_dev(param->dev)); + if (md) { + mdptr = dm_get_mdptr(md); + dm_put(md); + } + + return mdptr; } static struct mapped_device *find_device(struct dm_ioctl *param) @@ -690,6 +701,54 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) return dm_hash_rename(param->name, new_name); } +static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) +{ + int r = -EINVAL, x; + struct mapped_device *md; + struct hd_geometry geometry; + unsigned long indata[4]; + char *geostr = (char *) param + param->data_start; + + md = find_device(param); + if (!md) + return -ENXIO; + + if (geostr < (char *) (param + 1) || + invalid_str(geostr, (void *) param + param_size)) { + DMWARN("Invalid geometry supplied."); + goto out; + } + + x = sscanf(geostr, "%lu %lu %lu %lu", indata, + indata + 1, indata + 2, indata + 3); + + if (x != 4) { + DMWARN("Unable to interpret geometry settings."); + goto out; + } + + if (indata[0] > 65535 || indata[1] > 255 || + indata[2] > 255 || indata[3] > ULONG_MAX) { + DMWARN("Geometry exceeds range limits."); + goto out; + } + + geometry.cylinders = indata[0]; + geometry.heads = indata[1]; + geometry.sectors = indata[2]; + geometry.start = indata[3]; + + r = dm_set_geometry(md, &geometry); + if (!r) + r = __dev_status(md, param); + + param->data_size = 0; + +out: + dm_put(md); + return r; +} + static int do_suspend(struct dm_ioctl *param) { int r = 0; @@ -975,33 +1034,43 @@ static int table_load(struct dm_ioctl *param, size_t param_size) int r; struct hash_cell *hc; struct dm_table *t; + struct mapped_device *md; - r = dm_table_create(&t, get_mode(param), param->target_count); + md = find_device(param); + if (!md) + return -ENXIO; + + r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) - return r; + goto out; r = populate_table(t, param, param_size); if (r) { dm_table_put(t); - return r; + goto out; } down_write(&_hash_lock); - hc = __find_device_hash_cell(param); - if (!hc) { - DMWARN("device doesn't appear to be in the dev hash table."); - up_write(&_hash_lock); + hc = dm_get_mdptr(md); + if (!hc || hc->md != md) { + DMWARN("device has been removed from the dev hash table."); dm_table_put(t); - return -ENXIO; + up_write(&_hash_lock); + r = -ENXIO; + goto out; } if (hc->new_map) dm_table_put(hc->new_map); hc->new_map = t; + up_write(&_hash_lock); + param->flags |= DM_INACTIVE_PRESENT_FLAG; + r = __dev_status(md, param); + +out: + dm_put(md); - r = __dev_status(hc->md, param); - up_write(&_hash_lock); return r; } @@ -1214,7 +1283,8 @@ static ioctl_fn lookup_ioctl(unsigned int cmd) {DM_LIST_VERSIONS_CMD, list_versions}, - {DM_TARGET_MSG_CMD, target_message} + {DM_TARGET_MSG_CMD, target_message}, + {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} }; return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 6a2cd5dc8a63..daf586c0898d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -26,6 +26,7 @@ struct linear_c { static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct linear_c *lc; + unsigned long long tmp; if (argc != 2) { ti->error = "dm-linear: Invalid argument count"; @@ -38,10 +39,11 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } - if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) { + if (sscanf(argv[1], "%llu", &tmp) != 1) { ti->error = "dm-linear: Invalid device sector"; goto bad; } + lc->start = tmp; if (dm_get_device(ti, argv[0], lc->start, ti->len, dm_table_get_mode(ti->table), &lc->dev)) { @@ -87,8 +89,8 @@ static int linear_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - snprintf(result, maxlen, "%s " SECTOR_FORMAT, lc->dev->name, - lc->start); + snprintf(result, maxlen, "%s %llu", lc->dev->name, + (unsigned long long)lc->start); break; } return 0; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index f72a82fb9434..1816f30678ed 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -179,8 +179,7 @@ static struct multipath *alloc_multipath(void) m->queue_io = 1; INIT_WORK(&m->process_queued_ios, process_queued_ios, m); INIT_WORK(&m->trigger_event, trigger_event, m); - m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, - mempool_free_slab, _mpio_cache); + m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); return NULL; diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c index a28c1c2b4ef5..f10a0c89b3f4 100644 --- a/drivers/md/dm-path-selector.c +++ b/drivers/md/dm-path-selector.c @@ -86,8 +86,7 @@ void dm_put_path_selector(struct path_selector_type *pst) if (--psi->use == 0) module_put(psi->pst.module); - if (psi->use < 0) - BUG(); + BUG_ON(psi->use < 0); out: up_read(&_ps_lock); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 6cfa8d435d55..d12cf3e5e076 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region) /* FIXME move this */ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); -static void *region_alloc(gfp_t gfp_mask, void *pool_data) -{ - return kmalloc(sizeof(struct region), gfp_mask); -} - -static void region_free(void *element, void *pool_data) -{ - kfree(element); -} - #define MIN_REGIONS 64 #define MAX_RECOVERY 1 static int rh_init(struct region_hash *rh, struct mirror_set *ms, @@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms, INIT_LIST_HEAD(&rh->quiesced_regions); INIT_LIST_HEAD(&rh->recovered_regions); - rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, - region_free, NULL); + rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, + sizeof(struct region)); if (!rh->region_pool) { vfree(rh->buckets); rh->buckets = NULL; @@ -412,9 +402,21 @@ static void rh_dec(struct region_hash *rh, region_t region) spin_lock_irqsave(&rh->region_lock, flags); if (atomic_dec_and_test(®->pending)) { + /* + * There is no pending I/O for this region. + * We can move the region to corresponding list for next action. + * At this point, the region is not yet connected to any list. + * + * If the state is RH_NOSYNC, the region should be kept off + * from clean list. + * The hash entry for RH_NOSYNC will remain in memory + * until the region is recovered or the map is reloaded. + */ + + /* do nothing for RH_NOSYNC */ if (reg->state == RH_RECOVERING) { list_add_tail(®->list, &rh->quiesced_regions); - } else { + } else if (reg->state == RH_DIRTY) { reg->state = RH_CLEAN; list_add(®->list, &rh->clean_regions); } @@ -932,9 +934,9 @@ static inline int _check_region_size(struct dm_target *ti, uint32_t size) static int get_mirror(struct mirror_set *ms, struct dm_target *ti, unsigned int mirror, char **argv) { - sector_t offset; + unsigned long long offset; - if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { + if (sscanf(argv[1], "%llu", &offset) != 1) { ti->error = "dm-mirror: Invalid offset"; return -EINVAL; } @@ -1201,16 +1203,17 @@ static int mirror_status(struct dm_target *ti, status_type_t type, for (m = 0; m < ms->nr_mirrors; m++) DMEMIT("%s ", ms->mirror[m].dev->name); - DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, - ms->rh.log->type->get_sync_count(ms->rh.log), - ms->nr_regions); + DMEMIT("%llu/%llu", + (unsigned long long)ms->rh.log->type-> + get_sync_count(ms->rh.log), + (unsigned long long)ms->nr_regions); break; case STATUSTYPE_TABLE: DMEMIT("%d ", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) - DMEMIT("%s " SECTOR_FORMAT " ", - ms->mirror[m].dev->name, ms->mirror[m].offset); + DMEMIT("%s %llu ", ms->mirror[m].dev->name, + (unsigned long long)ms->mirror[m].offset); } return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f3759dd7828e..08312b46463a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -49,11 +49,26 @@ struct pending_exception { struct bio_list snapshot_bios; /* - * Other pending_exceptions that are processing this - * chunk. When this list is empty, we know we can - * complete the origins. + * Short-term queue of pending exceptions prior to submission. */ - struct list_head siblings; + struct list_head list; + + /* + * The primary pending_exception is the one that holds + * the sibling_count and the list of origin_bios for a + * group of pending_exceptions. It is always last to get freed. + * These fields get set up when writing to the origin. + */ + struct pending_exception *primary_pe; + + /* + * Number of pending_exceptions processing this chunk. + * When this drops to zero we must complete the origin bios. + * If incrementing or decrementing this, hold pe->snap->lock for + * the sibling concerned and not pe->primary_pe->snap->lock unless + * they are the same. + */ + atomic_t sibling_count; /* Pointer back to snapshot context */ struct dm_snapshot *snap; @@ -377,6 +392,8 @@ static void read_snapshot_metadata(struct dm_snapshot *s) down_write(&s->lock); s->valid = 0; up_write(&s->lock); + + dm_table_event(s->table); } } @@ -542,8 +559,12 @@ static void snapshot_dtr(struct dm_target *ti) { struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + /* Prevent further origin writes from using this snapshot. */ + /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); + kcopyd_client_destroy(s->kcopyd_client); + exit_exception_table(&s->pending, pending_cache); exit_exception_table(&s->complete, exception_cache); @@ -552,7 +573,7 @@ static void snapshot_dtr(struct dm_target *ti) dm_put_device(ti, s->origin); dm_put_device(ti, s->cow); - kcopyd_client_destroy(s->kcopyd_client); + kfree(s); } @@ -586,78 +607,117 @@ static void error_bios(struct bio *bio) } } +static inline void error_snapshot_bios(struct pending_exception *pe) +{ + error_bios(bio_list_get(&pe->snapshot_bios)); +} + static struct bio *__flush_bios(struct pending_exception *pe) { - struct pending_exception *sibling; + /* + * If this pe is involved in a write to the origin and + * it is the last sibling to complete then release + * the bios for the original write to the origin. + */ + + if (pe->primary_pe && + atomic_dec_and_test(&pe->primary_pe->sibling_count)) + return bio_list_get(&pe->primary_pe->origin_bios); + + return NULL; +} + +static void __invalidate_snapshot(struct dm_snapshot *s, + struct pending_exception *pe, int err) +{ + if (!s->valid) + return; - if (list_empty(&pe->siblings)) - return bio_list_get(&pe->origin_bios); + if (err == -EIO) + DMERR("Invalidating snapshot: Error reading/writing."); + else if (err == -ENOMEM) + DMERR("Invalidating snapshot: Unable to allocate exception."); - sibling = list_entry(pe->siblings.next, - struct pending_exception, siblings); + if (pe) + remove_exception(&pe->e); - list_del(&pe->siblings); + if (s->store.drop_snapshot) + s->store.drop_snapshot(&s->store); - /* This is fine as long as kcopyd is single-threaded. If kcopyd - * becomes multi-threaded, we'll need some locking here. - */ - bio_list_merge(&sibling->origin_bios, &pe->origin_bios); + s->valid = 0; - return NULL; + dm_table_event(s->table); } static void pending_complete(struct pending_exception *pe, int success) { struct exception *e; + struct pending_exception *primary_pe; struct dm_snapshot *s = pe->snap; struct bio *flush = NULL; - if (success) { - e = alloc_exception(); - if (!e) { - DMWARN("Unable to allocate exception."); - down_write(&s->lock); - s->store.drop_snapshot(&s->store); - s->valid = 0; - flush = __flush_bios(pe); - up_write(&s->lock); - - error_bios(bio_list_get(&pe->snapshot_bios)); - goto out; - } - *e = pe->e; - - /* - * Add a proper exception, and remove the - * in-flight exception from the list. - */ + if (!success) { + /* Read/write error - snapshot is unusable */ down_write(&s->lock); - insert_exception(&s->complete, e); - remove_exception(&pe->e); + __invalidate_snapshot(s, pe, -EIO); flush = __flush_bios(pe); - - /* Submit any pending write bios */ up_write(&s->lock); - flush_bios(bio_list_get(&pe->snapshot_bios)); - } else { - /* Read/write error - snapshot is unusable */ + error_snapshot_bios(pe); + goto out; + } + + e = alloc_exception(); + if (!e) { down_write(&s->lock); - if (s->valid) - DMERR("Error reading/writing snapshot"); - s->store.drop_snapshot(&s->store); - s->valid = 0; - remove_exception(&pe->e); + __invalidate_snapshot(s, pe, -ENOMEM); flush = __flush_bios(pe); up_write(&s->lock); - error_bios(bio_list_get(&pe->snapshot_bios)); + error_snapshot_bios(pe); + goto out; + } + *e = pe->e; - dm_table_event(s->table); + /* + * Add a proper exception, and remove the + * in-flight exception from the list. + */ + down_write(&s->lock); + if (!s->valid) { + flush = __flush_bios(pe); + up_write(&s->lock); + + free_exception(e); + + error_snapshot_bios(pe); + goto out; } + insert_exception(&s->complete, e); + remove_exception(&pe->e); + flush = __flush_bios(pe); + + up_write(&s->lock); + + /* Submit any pending write bios */ + flush_bios(bio_list_get(&pe->snapshot_bios)); + out: - free_pending_exception(pe); + primary_pe = pe->primary_pe; + + /* + * Free the pe if it's not linked to an origin write or if + * it's not itself a primary pe. + */ + if (!primary_pe || primary_pe != pe) + free_pending_exception(pe); + + /* + * Free the primary pe if nothing references it. + */ + if (primary_pe && !atomic_read(&primary_pe->sibling_count)) + free_pending_exception(primary_pe); if (flush) flush_bios(flush); @@ -734,38 +794,45 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) if (e) { /* cast the exception to a pending exception */ pe = container_of(e, struct pending_exception, e); + goto out; + } - } else { - /* - * Create a new pending exception, we don't want - * to hold the lock while we do this. - */ - up_write(&s->lock); - pe = alloc_pending_exception(); - down_write(&s->lock); + /* + * Create a new pending exception, we don't want + * to hold the lock while we do this. + */ + up_write(&s->lock); + pe = alloc_pending_exception(); + down_write(&s->lock); - e = lookup_exception(&s->pending, chunk); - if (e) { - free_pending_exception(pe); - pe = container_of(e, struct pending_exception, e); - } else { - pe->e.old_chunk = chunk; - bio_list_init(&pe->origin_bios); - bio_list_init(&pe->snapshot_bios); - INIT_LIST_HEAD(&pe->siblings); - pe->snap = s; - pe->started = 0; - - if (s->store.prepare_exception(&s->store, &pe->e)) { - free_pending_exception(pe); - s->valid = 0; - return NULL; - } + if (!s->valid) { + free_pending_exception(pe); + return NULL; + } - insert_exception(&s->pending, &pe->e); - } + e = lookup_exception(&s->pending, chunk); + if (e) { + free_pending_exception(pe); + pe = container_of(e, struct pending_exception, e); + goto out; + } + + pe->e.old_chunk = chunk; + bio_list_init(&pe->origin_bios); + bio_list_init(&pe->snapshot_bios); + pe->primary_pe = NULL; + atomic_set(&pe->sibling_count, 1); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + return NULL; } + insert_exception(&s->pending, &pe->e); + + out: return pe; } @@ -782,13 +849,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, { struct exception *e; struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + int copy_needed = 0; int r = 1; chunk_t chunk; - struct pending_exception *pe; + struct pending_exception *pe = NULL; chunk = sector_to_chunk(s, bio->bi_sector); /* Full snapshots are not usable */ + /* To get here the table must be live so s->active is always set. */ if (!s->valid) return -EIO; @@ -806,36 +875,41 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, * to copy an exception */ down_write(&s->lock); + if (!s->valid) { + r = -EIO; + goto out_unlock; + } + /* If the block is already remapped - use that, else remap it */ e = lookup_exception(&s->complete, chunk); if (e) { remap_exception(s, e, bio); - up_write(&s->lock); - - } else { - pe = __find_pending_exception(s, bio); - - if (!pe) { - if (s->store.drop_snapshot) - s->store.drop_snapshot(&s->store); - s->valid = 0; - r = -EIO; - up_write(&s->lock); - } else { - remap_exception(s, &pe->e, bio); - bio_list_add(&pe->snapshot_bios, bio); - - if (!pe->started) { - /* this is protected by snap->lock */ - pe->started = 1; - up_write(&s->lock); - start_copy(pe); - } else - up_write(&s->lock); - r = 0; - } + goto out_unlock; + } + + pe = __find_pending_exception(s, bio); + if (!pe) { + __invalidate_snapshot(s, pe, -ENOMEM); + r = -EIO; + goto out_unlock; + } + + remap_exception(s, &pe->e, bio); + bio_list_add(&pe->snapshot_bios, bio); + + if (!pe->started) { + /* this is protected by snap->lock */ + pe->started = 1; + copy_needed = 1; } + r = 0; + + out_unlock: + up_write(&s->lock); + + if (copy_needed) + start_copy(pe); } else { /* * FIXME: this read path scares me because we @@ -847,6 +921,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, /* Do reads */ down_read(&s->lock); + if (!s->valid) { + up_read(&s->lock); + return -EIO; + } + /* See if it it has been remapped */ e = lookup_exception(&s->complete, chunk); if (e) @@ -884,9 +963,9 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, snap->store.fraction_full(&snap->store, &numerator, &denominator); - snprintf(result, maxlen, - SECTOR_FORMAT "/" SECTOR_FORMAT, - numerator, denominator); + snprintf(result, maxlen, "%llu/%llu", + (unsigned long long)numerator, + (unsigned long long)denominator); } else snprintf(result, maxlen, "Unknown"); @@ -899,9 +978,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, * to make private copies if the output is to * make sense. */ - snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT, + snprintf(result, maxlen, "%s %s %c %llu", snap->origin->name, snap->cow->name, - snap->type, snap->chunk_size); + snap->type, + (unsigned long long)snap->chunk_size); break; } @@ -911,40 +991,27 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, /*----------------------------------------------------------------- * Origin methods *---------------------------------------------------------------*/ -static void list_merge(struct list_head *l1, struct list_head *l2) -{ - struct list_head *l1_n, *l2_p; - - l1_n = l1->next; - l2_p = l2->prev; - - l1->next = l2; - l2->prev = l1; - - l2_p->next = l1_n; - l1_n->prev = l2_p; -} - static int __origin_write(struct list_head *snapshots, struct bio *bio) { - int r = 1, first = 1; + int r = 1, first = 0; struct dm_snapshot *snap; struct exception *e; - struct pending_exception *pe, *last = NULL; + struct pending_exception *pe, *next_pe, *primary_pe = NULL; chunk_t chunk; + LIST_HEAD(pe_queue); /* Do all the snapshots on this origin */ list_for_each_entry (snap, snapshots, list) { + down_write(&snap->lock); + /* Only deal with valid and active snapshots */ if (!snap->valid || !snap->active) - continue; + goto next_snapshot; /* Nothing to do if writing beyond end of snapshot */ if (bio->bi_sector >= dm_table_get_size(snap->table)) - continue; - - down_write(&snap->lock); + goto next_snapshot; /* * Remember, different snapshots can have @@ -956,49 +1023,75 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) * Check exception table to see if block * is already remapped in this snapshot * and trigger an exception if not. + * + * sibling_count is initialised to 1 so pending_complete() + * won't destroy the primary_pe while we're inside this loop. */ e = lookup_exception(&snap->complete, chunk); - if (!e) { - pe = __find_pending_exception(snap, bio); - if (!pe) { - snap->store.drop_snapshot(&snap->store); - snap->valid = 0; - - } else { - if (last) - list_merge(&pe->siblings, - &last->siblings); - - last = pe; - r = 0; + if (e) + goto next_snapshot; + + pe = __find_pending_exception(snap, bio); + if (!pe) { + __invalidate_snapshot(snap, pe, ENOMEM); + goto next_snapshot; + } + + if (!primary_pe) { + /* + * Either every pe here has same + * primary_pe or none has one yet. + */ + if (pe->primary_pe) + primary_pe = pe->primary_pe; + else { + primary_pe = pe; + first = 1; } + + bio_list_add(&primary_pe->origin_bios, bio); + + r = 0; + } + + if (!pe->primary_pe) { + atomic_inc(&primary_pe->sibling_count); + pe->primary_pe = primary_pe; + } + + if (!pe->started) { + pe->started = 1; + list_add_tail(&pe->list, &pe_queue); } + next_snapshot: up_write(&snap->lock); } + if (!primary_pe) + goto out; + /* - * Now that we have a complete pe list we can start the copying. + * If this is the first time we're processing this chunk and + * sibling_count is now 1 it means all the pending exceptions + * got completed while we were in the loop above, so it falls to + * us here to remove the primary_pe and submit any origin_bios. */ - if (last) { - pe = last; - do { - down_write(&pe->snap->lock); - if (first) - bio_list_add(&pe->origin_bios, bio); - if (!pe->started) { - pe->started = 1; - up_write(&pe->snap->lock); - start_copy(pe); - } else - up_write(&pe->snap->lock); - first = 0; - pe = list_entry(pe->siblings.next, - struct pending_exception, siblings); - - } while (pe != last); + + if (first && atomic_dec_and_test(&primary_pe->sibling_count)) { + flush_bios(bio_list_get(&primary_pe->origin_bios)); + free_pending_exception(primary_pe); + /* If we got here, pe_queue is necessarily empty. */ + goto out; } + /* + * Now that we have a complete pe list we can start the copying. + */ + list_for_each_entry_safe(pe, next_pe, &pe_queue, list) + start_copy(pe); + + out: return r; } @@ -1174,8 +1267,7 @@ static int __init dm_snapshot_init(void) goto bad4; } - pending_pool = mempool_create(128, mempool_alloc_slab, - mempool_free_slab, pending_cache); + pending_pool = mempool_create_slab_pool(128, pending_cache); if (!pending_pool) { DMERR("Couldn't create pending pool."); r = -ENOMEM; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 697aacafb02a..08328a8f5a3c 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -49,9 +49,9 @@ static inline struct stripe_c *alloc_context(unsigned int stripes) static int get_stripe(struct dm_target *ti, struct stripe_c *sc, unsigned int stripe, char **argv) { - sector_t start; + unsigned long long start; - if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1) + if (sscanf(argv[1], "%llu", &start) != 1) return -EINVAL; if (dm_get_device(ti, argv[0], start, sc->stripe_width, @@ -103,7 +103,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -EINVAL; } - if (((uint32_t)ti->len) & (chunk_size - 1)) { + if (ti->len & (chunk_size - 1)) { ti->error = "dm-stripe: Target length not divisible by " "chunk size"; return -EINVAL; @@ -201,10 +201,11 @@ static int stripe_status(struct dm_target *ti, break; case STATUSTYPE_TABLE: - DMEMIT("%d " SECTOR_FORMAT, sc->stripes, sc->chunk_mask + 1); + DMEMIT("%d %llu", sc->stripes, + (unsigned long long)sc->chunk_mask + 1); for (i = 0; i < sc->stripes; i++) - DMEMIT(" %s " SECTOR_FORMAT, sc->stripe[i].dev->name, - sc->stripe[i].physical_start); + DMEMIT(" %s %llu", sc->stripe[i].dev->name, + (unsigned long long)sc->stripe[i].physical_start); break; } return 0; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 9b1e2f5ca630..8f56a54cf0ce 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -14,6 +14,7 @@ #include <linux/ctype.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/mutex.h> #include <asm/atomic.h> #define MAX_DEPTH 16 @@ -22,6 +23,7 @@ #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) struct dm_table { + struct mapped_device *md; atomic_t holders; /* btree table */ @@ -97,6 +99,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, lhs->seg_boundary_mask = min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); + + lhs->no_cluster |= rhs->no_cluster; } /* @@ -204,7 +208,8 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) +int dm_table_create(struct dm_table **result, int mode, + unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); @@ -227,6 +232,7 @@ int dm_table_create(struct dm_table **result, int mode, unsigned num_targets) } t->mode = mode; + t->md = md; *result = t; return 0; } @@ -345,20 +351,19 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) /* * Open a device so we can use it as a map destination. */ -static int open_dev(struct dm_dev *d, dev_t dev) +static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; - if (d->bdev) - BUG(); + BUG_ON(d->bdev); bdev = open_by_devnum(dev, d->mode); if (IS_ERR(bdev)) return PTR_ERR(bdev); - r = bd_claim(bdev, _claim_ptr); + r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); if (r) blkdev_put(bdev); else @@ -369,12 +374,12 @@ static int open_dev(struct dm_dev *d, dev_t dev) /* * Close a device that we've been using. */ -static void close_dev(struct dm_dev *d) +static void close_dev(struct dm_dev *d, struct mapped_device *md) { if (!d->bdev) return; - bd_release(d->bdev); + bd_release_from_disk(d->bdev, dm_disk(md)); blkdev_put(d->bdev); d->bdev = NULL; } @@ -395,7 +400,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) * careful to leave things as they were if we fail to reopen the * device. */ -static int upgrade_mode(struct dm_dev *dd, int new_mode) +static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) { int r; struct dm_dev dd_copy; @@ -405,9 +410,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode) dd->mode |= new_mode; dd->bdev = NULL; - r = open_dev(dd, dev); + r = open_dev(dd, dev, md); if (!r) - close_dev(&dd_copy); + close_dev(&dd_copy, md); else *dd = dd_copy; @@ -427,8 +432,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, struct dm_dev *dd; unsigned int major, minor; - if (!t) - BUG(); + BUG_ON(!t); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ @@ -450,7 +454,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, dd->mode = mode; dd->bdev = NULL; - if ((r = open_dev(dd, dev))) { + if ((r = open_dev(dd, dev, t->md))) { kfree(dd); return r; } @@ -461,7 +465,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, list_add(&dd->list, &t->devices); } else if (dd->mode != (mode | dd->mode)) { - r = upgrade_mode(dd, mode); + r = upgrade_mode(dd, mode, t->md); if (r) return r; } @@ -525,6 +529,8 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, rs->seg_boundary_mask = min_not_zero(rs->seg_boundary_mask, q->seg_boundary_mask); + + rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); } return r; @@ -536,7 +542,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start, void dm_put_device(struct dm_target *ti, struct dm_dev *dd) { if (atomic_dec_and_test(&dd->count)) { - close_dev(dd); + close_dev(dd, ti->table->md); list_del(&dd->list); kfree(dd); } @@ -765,14 +771,14 @@ int dm_table_complete(struct dm_table *t) return r; } -static DECLARE_MUTEX(_event_lock); +static DEFINE_MUTEX(_event_lock); void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { - down(&_event_lock); + mutex_lock(&_event_lock); t->event_fn = fn; t->event_context = context; - up(&_event_lock); + mutex_unlock(&_event_lock); } void dm_table_event(struct dm_table *t) @@ -783,10 +789,10 @@ void dm_table_event(struct dm_table *t) */ BUG_ON(in_interrupt()); - down(&_event_lock); + mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); - up(&_event_lock); + mutex_unlock(&_event_lock); } sector_t dm_table_get_size(struct dm_table *t) @@ -834,6 +840,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) q->hardsect_size = t->limits.hardsect_size; q->max_segment_size = t->limits.max_segment_size; q->seg_boundary_mask = t->limits.seg_boundary_mask; + if (t->limits.no_cluster) + q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); + else + q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); + } unsigned int dm_table_get_num_targets(struct dm_table *t) @@ -945,12 +956,20 @@ int dm_table_flush_all(struct dm_table *t) return ret; } +struct mapped_device *dm_table_get_md(struct dm_table *t) +{ + dm_get(t->md); + + return t->md; +} + EXPORT_SYMBOL(dm_vcalloc); EXPORT_SYMBOL(dm_get_device); EXPORT_SYMBOL(dm_put_device); EXPORT_SYMBOL(dm_table_event); EXPORT_SYMBOL(dm_table_get_size); EXPORT_SYMBOL(dm_table_get_mode); +EXPORT_SYMBOL(dm_table_get_md); EXPORT_SYMBOL(dm_table_put); EXPORT_SYMBOL(dm_table_get); EXPORT_SYMBOL(dm_table_unplug_all); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8c82373f7ff3..4d710b7a133b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/moduleparam.h> #include <linux/blkpg.h> #include <linux/bio.h> @@ -17,6 +18,7 @@ #include <linux/mempool.h> #include <linux/slab.h> #include <linux/idr.h> +#include <linux/hdreg.h> #include <linux/blktrace_api.h> static const char *_name = DM_NAME; @@ -69,6 +71,7 @@ struct mapped_device { request_queue_t *queue; struct gendisk *disk; + char name[16]; void *interface_ptr; @@ -101,6 +104,9 @@ struct mapped_device { */ struct super_block *frozen_sb; struct block_device *suspended_bdev; + + /* forced geometry settings */ + struct hd_geometry geometry; }; #define MIN_IOS 256 @@ -226,6 +232,13 @@ static int dm_blk_close(struct inode *inode, struct file *file) return 0; } +static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct mapped_device *md = bdev->bd_disk->private_data; + + return dm_get_geometry(md, geo); +} + static inline struct dm_io *alloc_io(struct mapped_device *md) { return mempool_alloc(md->io_pool, GFP_NOIO); @@ -312,6 +325,33 @@ struct dm_table *dm_get_table(struct mapped_device *md) return t; } +/* + * Get the geometry associated with a dm device + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) +{ + *geo = md->geometry; + + return 0; +} + +/* + * Set the geometry of a device. + */ +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) +{ + sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; + + if (geo->start > sz) { + DMWARN("Start sector is beyond the geometry limits."); + return -EINVAL; + } + + md->geometry = *geo; + + return 0; +} + /*----------------------------------------------------------------- * CRUD START: * A more elegant soln is in the works that uses the queue @@ -704,14 +744,14 @@ static int dm_any_congested(void *congested_data, int bdi_bits) /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ -static DECLARE_MUTEX(_minor_lock); +static DEFINE_MUTEX(_minor_lock); static DEFINE_IDR(_minor_idr); static void free_minor(unsigned int minor) { - down(&_minor_lock); + mutex_lock(&_minor_lock); idr_remove(&_minor_idr, minor); - up(&_minor_lock); + mutex_unlock(&_minor_lock); } /* @@ -724,7 +764,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor) if (minor >= (1 << MINORBITS)) return -EINVAL; - down(&_minor_lock); + mutex_lock(&_minor_lock); if (idr_find(&_minor_idr, minor)) { r = -EBUSY; @@ -749,7 +789,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor) } out: - up(&_minor_lock); + mutex_unlock(&_minor_lock); return r; } @@ -758,7 +798,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor) int r; unsigned int m; - down(&_minor_lock); + mutex_lock(&_minor_lock); r = idr_pre_get(&_minor_idr, GFP_KERNEL); if (!r) { @@ -780,7 +820,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor) *minor = m; out: - up(&_minor_lock); + mutex_unlock(&_minor_lock); return r; } @@ -823,13 +863,11 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) md->queue->unplug_fn = dm_unplug_all; md->queue->issue_flush_fn = dm_flush_all; - md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, - mempool_free_slab, _io_cache); + md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); if (!md->io_pool) goto bad2; - md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, - mempool_free_slab, _tio_cache); + md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); if (!md->tio_pool) goto bad3; @@ -844,6 +882,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); add_disk(md->disk); + format_dev_t(md->name, MKDEV(_major, minor)); atomic_set(&md->pending, 0); init_waitqueue_head(&md->wait); @@ -906,6 +945,13 @@ static int __bind(struct mapped_device *md, struct dm_table *t) sector_t size; size = dm_table_get_size(t); + + /* + * Wipe any geometry if the size of the table changed. + */ + if (size != get_capacity(md->disk)) + memset(&md->geometry, 0, sizeof(md->geometry)); + __set_size(md, size); if (size == 0) return 0; @@ -969,13 +1015,13 @@ static struct mapped_device *dm_find_md(dev_t dev) if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; - down(&_minor_lock); + mutex_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); if (!md || (dm_disk(md)->first_minor != minor)) md = NULL; - up(&_minor_lock); + mutex_unlock(&_minor_lock); return md; } @@ -990,15 +1036,9 @@ struct mapped_device *dm_get_md(dev_t dev) return md; } -void *dm_get_mdptr(dev_t dev) +void *dm_get_mdptr(struct mapped_device *md) { - struct mapped_device *md; - void *mdptr = NULL; - - md = dm_find_md(dev); - if (md) - mdptr = md->interface_ptr; - return mdptr; + return md->interface_ptr; } void dm_set_mdptr(struct mapped_device *md, void *ptr) @@ -1013,18 +1053,18 @@ void dm_get(struct mapped_device *md) void dm_put(struct mapped_device *md) { - struct dm_table *map = dm_get_table(md); + struct dm_table *map; if (atomic_dec_and_test(&md->holders)) { + map = dm_get_table(md); if (!dm_suspended(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } __unbind(md); + dm_table_put(map); free_dev(md); } - - dm_table_put(map); } /* @@ -1109,6 +1149,7 @@ int dm_suspend(struct mapped_device *md, int do_lockfs) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); + struct bio *def; int r = -EINVAL; down(&md->suspend_lock); @@ -1168,9 +1209,11 @@ int dm_suspend(struct mapped_device *md, int do_lockfs) /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { + clear_bit(DMF_BLOCK_IO, &md->flags); + def = bio_list_get(&md->deferred); + __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); - clear_bit(DMF_BLOCK_IO, &md->flags); goto out; } up_write(&md->io_lock); @@ -1264,6 +1307,7 @@ int dm_suspended(struct mapped_device *md) static struct block_device_operations dm_blk_dops = { .open = dm_blk_open, .release = dm_blk_close, + .getgeo = dm_blk_getgeo, .owner = THIS_MODULE }; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4eaf075da217..fd90bc8f9e45 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -14,6 +14,7 @@ #include <linux/device-mapper.h> #include <linux/list.h> #include <linux/blkdev.h> +#include <linux/hdreg.h> #define DM_NAME "device-mapper" #define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) @@ -23,16 +24,6 @@ #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) -/* - * FIXME: I think this should be with the definition of sector_t - * in types.h. - */ -#ifdef CONFIG_LBD -#define SECTOR_FORMAT "%llu" -#else -#define SECTOR_FORMAT "%lu" -#endif - #define SECTOR_SHIFT 9 /* @@ -57,7 +48,7 @@ struct mapped_device; int dm_create(struct mapped_device **md); int dm_create_with_minor(unsigned int minor, struct mapped_device **md); void dm_set_mdptr(struct mapped_device *md, void *ptr); -void *dm_get_mdptr(dev_t dev); +void *dm_get_mdptr(struct mapped_device *md); struct mapped_device *dm_get_md(dev_t dev); /* @@ -95,11 +86,18 @@ int dm_wait_event(struct mapped_device *md, int event_nr); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct mapped_device *md); +/* + * Geometry functions. + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); + /*----------------------------------------------------------------- * Functions for manipulating a table. Tables are also reference * counted. *---------------------------------------------------------------*/ -int dm_table_create(struct dm_table **result, int mode, unsigned num_targets); +int dm_table_create(struct dm_table **result, int mode, + unsigned num_targets, struct mapped_device *md); void dm_table_get(struct dm_table *t); void dm_table_put(struct dm_table *t); @@ -117,6 +115,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); unsigned int dm_table_get_num_targets(struct dm_table *t); struct list_head *dm_table_get_devices(struct dm_table *t); int dm_table_get_mode(struct dm_table *t); +struct mapped_device *dm_table_get_md(struct dm_table *t); void dm_table_presuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); void dm_table_resume_targets(struct dm_table *t); diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index 8b3515f394a6..72480a48d88b 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> +#include <linux/mutex.h> #include "kcopyd.h" @@ -44,6 +45,9 @@ struct kcopyd_client { struct page_list *pages; unsigned int nr_pages; unsigned int nr_free_pages; + + wait_queue_head_t destroyq; + atomic_t nr_jobs; }; static struct page_list *alloc_pl(void) @@ -227,8 +231,7 @@ static int jobs_init(void) if (!_job_cache) return -ENOMEM; - _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, - mempool_free_slab, _job_cache); + _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); if (!_job_pool) { kmem_cache_destroy(_job_cache); return -ENOMEM; @@ -293,10 +296,15 @@ static int run_complete_job(struct kcopyd_job *job) int read_err = job->read_err; unsigned int write_err = job->write_err; kcopyd_notify_fn fn = job->fn; + struct kcopyd_client *kc = job->kc; - kcopyd_put_pages(job->kc, job->pages); + kcopyd_put_pages(kc, job->pages); mempool_free(job, _job_pool); fn(read_err, write_err, context); + + if (atomic_dec_and_test(&kc->nr_jobs)) + wake_up(&kc->destroyq); + return 0; } @@ -431,6 +439,7 @@ static void do_work(void *ignored) */ static void dispatch_job(struct kcopyd_job *job) { + atomic_inc(&job->kc->nr_jobs); push(&_pages_jobs, job); wake(); } @@ -573,68 +582,68 @@ int kcopyd_cancel(struct kcopyd_job *job, int block) /*----------------------------------------------------------------- * Unit setup *---------------------------------------------------------------*/ -static DECLARE_MUTEX(_client_lock); +static DEFINE_MUTEX(_client_lock); static LIST_HEAD(_clients); static void client_add(struct kcopyd_client *kc) { - down(&_client_lock); + mutex_lock(&_client_lock); list_add(&kc->list, &_clients); - up(&_client_lock); + mutex_unlock(&_client_lock); } static void client_del(struct kcopyd_client *kc) { - down(&_client_lock); + mutex_lock(&_client_lock); list_del(&kc->list); - up(&_client_lock); + mutex_unlock(&_client_lock); } -static DECLARE_MUTEX(kcopyd_init_lock); +static DEFINE_MUTEX(kcopyd_init_lock); static int kcopyd_clients = 0; static int kcopyd_init(void) { int r; - down(&kcopyd_init_lock); + mutex_lock(&kcopyd_init_lock); if (kcopyd_clients) { /* Already initialized. */ kcopyd_clients++; - up(&kcopyd_init_lock); + mutex_unlock(&kcopyd_init_lock); return 0; } r = jobs_init(); if (r) { - up(&kcopyd_init_lock); + mutex_unlock(&kcopyd_init_lock); return r; } _kcopyd_wq = create_singlethread_workqueue("kcopyd"); if (!_kcopyd_wq) { jobs_exit(); - up(&kcopyd_init_lock); + mutex_unlock(&kcopyd_init_lock); return -ENOMEM; } kcopyd_clients++; INIT_WORK(&_kcopyd_work, do_work, NULL); - up(&kcopyd_init_lock); + mutex_unlock(&kcopyd_init_lock); return 0; } static void kcopyd_exit(void) { - down(&kcopyd_init_lock); + mutex_lock(&kcopyd_init_lock); kcopyd_clients--; if (!kcopyd_clients) { jobs_exit(); destroy_workqueue(_kcopyd_wq); _kcopyd_wq = NULL; } - up(&kcopyd_init_lock); + mutex_unlock(&kcopyd_init_lock); } int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) @@ -670,6 +679,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) return r; } + init_waitqueue_head(&kc->destroyq); + atomic_set(&kc->nr_jobs, 0); + client_add(kc); *result = kc; return 0; @@ -677,6 +689,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) void kcopyd_client_destroy(struct kcopyd_client *kc) { + /* Wait for completion of all jobs submitted by this client. */ + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); + dm_io_put(kc->nr_pages); client_free_pages(kc); client_del(kc); diff --git a/drivers/md/md.c b/drivers/md/md.c index 5ed2228745cb..039e071c1007 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -43,6 +43,7 @@ #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/suspend.h> #include <linux/poll.h> +#include <linux/mutex.h> #include <linux/init.h> @@ -158,11 +159,12 @@ static int start_readonly; */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); static atomic_t md_event_count; -static void md_new_event(mddev_t *mddev) +void md_new_event(mddev_t *mddev) { atomic_inc(&md_event_count); wake_up(&md_event_waiters); } +EXPORT_SYMBOL_GPL(md_new_event); /* * Enables to iterate over all existing md arrays @@ -253,7 +255,7 @@ static mddev_t * mddev_find(dev_t unit) else new->md_minor = MINOR(unit) >> MdpMinorShift; - init_MUTEX(&new->reconfig_sem); + mutex_init(&new->reconfig_mutex); INIT_LIST_HEAD(&new->disks); INIT_LIST_HEAD(&new->all_mddevs); init_timer(&new->safemode_timer); @@ -266,6 +268,7 @@ static mddev_t * mddev_find(dev_t unit) kfree(new); return NULL; } + set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); blk_queue_make_request(new->queue, md_fail_request); @@ -274,22 +277,22 @@ static mddev_t * mddev_find(dev_t unit) static inline int mddev_lock(mddev_t * mddev) { - return down_interruptible(&mddev->reconfig_sem); + return mutex_lock_interruptible(&mddev->reconfig_mutex); } static inline void mddev_lock_uninterruptible(mddev_t * mddev) { - down(&mddev->reconfig_sem); + mutex_lock(&mddev->reconfig_mutex); } static inline int mddev_trylock(mddev_t * mddev) { - return down_trylock(&mddev->reconfig_sem); + return mutex_trylock(&mddev->reconfig_mutex); } static inline void mddev_unlock(mddev_t * mddev) { - up(&mddev->reconfig_sem); + mutex_unlock(&mddev->reconfig_mutex); md_wakeup_thread(mddev->thread); } @@ -660,7 +663,8 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version } if (sb->major_version != 0 || - sb->minor_version != 90) { + sb->minor_version < 90 || + sb->minor_version > 91) { printk(KERN_WARNING "Bad version number %d.%d on %s\n", sb->major_version, sb->minor_version, b); @@ -745,6 +749,20 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->bitmap_offset = 0; mddev->default_bitmap_offset = MD_SB_BYTES >> 9; + if (mddev->minor_version >= 91) { + mddev->reshape_position = sb->reshape_position; + mddev->delta_disks = sb->delta_disks; + mddev->new_level = sb->new_level; + mddev->new_layout = sb->new_layout; + mddev->new_chunk = sb->new_chunk; + } else { + mddev->reshape_position = MaxSector; + mddev->delta_disks = 0; + mddev->new_level = mddev->level; + mddev->new_layout = mddev->layout; + mddev->new_chunk = mddev->chunk_size; + } + if (sb->state & (1<<MD_SB_CLEAN)) mddev->recovery_cp = MaxSector; else { @@ -764,7 +782,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && mddev->bitmap_file == NULL) { - if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 + if (mddev->level != 1 && mddev->level != 4 + && mddev->level != 5 && mddev->level != 6 && mddev->level != 10) { /* FIXME use a better test */ printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); @@ -838,7 +857,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->md_magic = MD_SB_MAGIC; sb->major_version = mddev->major_version; - sb->minor_version = mddev->minor_version; sb->patch_version = mddev->patch_version; sb->gvalid_words = 0; /* ignored */ memcpy(&sb->set_uuid0, mddev->uuid+0, 4); @@ -857,6 +875,17 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->events_hi = (mddev->events>>32); sb->events_lo = (u32)mddev->events; + if (mddev->reshape_position == MaxSector) + sb->minor_version = 90; + else { + sb->minor_version = 91; + sb->reshape_position = mddev->reshape_position; + sb->new_level = mddev->new_level; + sb->delta_disks = mddev->delta_disks; + sb->new_layout = mddev->new_layout; + sb->new_chunk = mddev->new_chunk; + } + mddev->minor_version = sb->minor_version; if (mddev->in_sync) { sb->recovery_cp = mddev->recovery_cp; @@ -893,10 +922,9 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ - if (test_bit(Faulty, &rdev2->flags)) { + if (test_bit(Faulty, &rdev2->flags)) d->state = (1<<MD_DISK_FAULTY); - failed++; - } else if (test_bit(In_sync, &rdev2->flags)) { + else if (test_bit(In_sync, &rdev2->flags)) { d->state = (1<<MD_DISK_ACTIVE); d->state |= (1<<MD_DISK_SYNC); active++; @@ -1102,6 +1130,20 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) } mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { + mddev->reshape_position = le64_to_cpu(sb->reshape_position); + mddev->delta_disks = le32_to_cpu(sb->delta_disks); + mddev->new_level = le32_to_cpu(sb->new_level); + mddev->new_layout = le32_to_cpu(sb->new_layout); + mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; + } else { + mddev->reshape_position = MaxSector; + mddev->delta_disks = 0; + mddev->new_level = mddev->level; + mddev->new_layout = mddev->layout; + mddev->new_chunk = mddev->chunk_size; + } + } else if (mddev->pers == NULL) { /* Insist of good event counter while assembling */ __u64 ev1 = le64_to_cpu(sb->events); @@ -1173,6 +1215,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); } + if (mddev->reshape_position != MaxSector) { + sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); + sb->reshape_position = cpu_to_le64(mddev->reshape_position); + sb->new_layout = cpu_to_le32(mddev->new_layout); + sb->delta_disks = cpu_to_le32(mddev->delta_disks); + sb->new_level = cpu_to_le32(mddev->new_level); + sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); + } max_dev = 0; ITERATE_RDEV(mddev,rdev2,tmp) @@ -1301,6 +1351,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) else ko = &rdev->bdev->bd_disk->kobj; sysfs_create_link(&rdev->kobj, ko, "block"); + bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk); return 0; } @@ -1311,6 +1362,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) MD_BUG(); return; } + bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); list_del_init(&rdev->same_set); printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; @@ -1493,7 +1545,7 @@ static void sync_sbs(mddev_t * mddev) } } -static void md_update_sb(mddev_t * mddev) +void md_update_sb(mddev_t * mddev) { int err; struct list_head *tmp; @@ -1570,6 +1622,7 @@ repeat: wake_up(&mddev->sb_wait); } +EXPORT_SYMBOL_GPL(md_update_sb); /* words written to sysfs files may, or my not, be \n terminated. * We want to accept with case. For this we use cmd_match. @@ -2162,7 +2215,9 @@ action_show(mddev_t *mddev, char *page) char *type = "idle"; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + type = "reshape"; + else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) type = "resync"; else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) @@ -2193,7 +2248,14 @@ action_store(mddev_t *mddev, const char *page, size_t len) return -EBUSY; else if (cmd_match(page, "resync") || cmd_match(page, "recover")) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - else { + else if (cmd_match(page, "reshape")) { + int err; + if (mddev->pers->start_reshape == NULL) + return -EINVAL; + err = mddev->pers->start_reshape(mddev); + if (err) + return err; + } else { if (cmd_match(page, "check")) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); else if (cmd_match(page, "repair")) @@ -2304,6 +2366,63 @@ sync_completed_show(mddev_t *mddev, char *page) static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); +static ssize_t +suspend_lo_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); +} + +static ssize_t +suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long new = simple_strtoull(buf, &e, 10); + + if (mddev->pers->quiesce == NULL) + return -EINVAL; + if (buf == e || (*e && *e != '\n')) + return -EINVAL; + if (new >= mddev->suspend_hi || + (new > mddev->suspend_lo && new < mddev->suspend_hi)) { + mddev->suspend_lo = new; + mddev->pers->quiesce(mddev, 2); + return len; + } else + return -EINVAL; +} +static struct md_sysfs_entry md_suspend_lo = +__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); + + +static ssize_t +suspend_hi_show(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); +} + +static ssize_t +suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) +{ + char *e; + unsigned long long new = simple_strtoull(buf, &e, 10); + + if (mddev->pers->quiesce == NULL) + return -EINVAL; + if (buf == e || (*e && *e != '\n')) + return -EINVAL; + if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || + (new > mddev->suspend_lo && new > mddev->suspend_hi)) { + mddev->suspend_hi = new; + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + return len; + } else + return -EINVAL; +} +static struct md_sysfs_entry md_suspend_hi = +__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); + + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, @@ -2321,6 +2440,8 @@ static struct attribute *md_redundancy_attrs[] = { &md_sync_max.attr, &md_sync_speed.attr, &md_sync_completed.attr, + &md_suspend_lo.attr, + &md_suspend_hi.attr, NULL, }; static struct attribute_group md_redundancy_group = { @@ -2380,7 +2501,7 @@ int mdp_major = 0; static struct kobject *md_probe(dev_t dev, int *part, void *data) { - static DECLARE_MUTEX(disks_sem); + static DEFINE_MUTEX(disks_mutex); mddev_t *mddev = mddev_find(dev); struct gendisk *disk; int partitioned = (MAJOR(dev) != MD_MAJOR); @@ -2390,15 +2511,15 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) if (!mddev) return NULL; - down(&disks_sem); + mutex_lock(&disks_mutex); if (mddev->gendisk) { - up(&disks_sem); + mutex_unlock(&disks_mutex); mddev_put(mddev); return NULL; } disk = alloc_disk(1 << shift); if (!disk) { - up(&disks_sem); + mutex_unlock(&disks_mutex); mddev_put(mddev); return NULL; } @@ -2416,7 +2537,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) disk->queue = mddev->queue; add_disk(disk); mddev->gendisk = disk; - up(&disks_sem); + mutex_unlock(&disks_mutex); mddev->kobj.parent = &disk->kobj; mddev->kobj.k_name = NULL; snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); @@ -2539,6 +2660,14 @@ static int do_md_run(mddev_t * mddev) mddev->level = pers->level; strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); + if (mddev->reshape_position != MaxSector && + pers->start_reshape == NULL) { + /* This personality cannot handle reshaping... */ + mddev->pers = NULL; + module_put(pers->owner); + return -EINVAL; + } + mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ mddev->barriers_work = 1; @@ -2772,7 +2901,6 @@ static void autorun_array(mddev_t *mddev) */ static void autorun_devices(int part) { - struct list_head candidates; struct list_head *tmp; mdk_rdev_t *rdev0, *rdev; mddev_t *mddev; @@ -2781,6 +2909,7 @@ static void autorun_devices(int part) printk(KERN_INFO "md: autorun ...\n"); while (!list_empty(&pending_raid_disks)) { dev_t dev; + LIST_HEAD(candidates); rdev0 = list_entry(pending_raid_disks.next, mdk_rdev_t, same_set); @@ -3427,11 +3556,18 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) mddev->default_bitmap_offset = MD_SB_BYTES >> 9; mddev->bitmap_offset = 0; + mddev->reshape_position = MaxSector; + /* * Generate a 128 bit UUID */ get_random_bytes(mddev->uuid, 16); + mddev->new_level = mddev->level; + mddev->new_chunk = mddev->chunk_size; + mddev->new_layout = mddev->layout; + mddev->delta_disks = 0; + return 0; } @@ -3440,6 +3576,7 @@ static int update_size(mddev_t *mddev, unsigned long size) mdk_rdev_t * rdev; int rv; struct list_head *tmp; + int fit = (size == 0); if (mddev->pers->resize == NULL) return -EINVAL; @@ -3457,7 +3594,6 @@ static int update_size(mddev_t *mddev, unsigned long size) return -EBUSY; ITERATE_RDEV(mddev,rdev,tmp) { sector_t avail; - int fit = (size == 0); if (rdev->sb_offset > rdev->data_offset) avail = (rdev->sb_offset*2) - rdev->data_offset; else @@ -3487,14 +3623,16 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks) { int rv; /* change the number of raid disks */ - if (mddev->pers->reshape == NULL) + if (mddev->pers->check_reshape == NULL) return -EINVAL; if (raid_disks <= 0 || raid_disks >= mddev->max_disks) return -EINVAL; - if (mddev->sync_thread) + if (mddev->sync_thread || mddev->reshape_position != MaxSector) return -EBUSY; - rv = mddev->pers->reshape(mddev, raid_disks); + mddev->delta_disks = raid_disks - mddev->raid_disks; + + rv = mddev->pers->check_reshape(mddev); return rv; } @@ -4041,7 +4179,10 @@ static void status_unused(struct seq_file *seq) static void status_resync(struct seq_file *seq, mddev_t * mddev) { - unsigned long max_blocks, resync, res, dt, db, rt; + sector_t max_blocks, resync, res; + unsigned long dt, db, rt; + int scale; + unsigned int per_milli; resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; @@ -4057,9 +4198,22 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev) MD_BUG(); return; } - res = (resync/1024)*1000/(max_blocks/1024 + 1); + /* Pick 'scale' such that (resync>>scale)*1000 will fit + * in a sector_t, and (max_blocks>>scale) will fit in a + * u32, as those are the requirements for sector_div. + * Thus 'scale' must be at least 10 + */ + scale = 10; + if (sizeof(sector_t) > sizeof(unsigned long)) { + while ( max_blocks/2 > (1ULL<<(scale+32))) + scale++; + } + res = (resync>>scale)*1000; + sector_div(res, (u32)((max_blocks>>scale)+1)); + + per_milli = res; { - int i, x = res/50, y = 20-x; + int i, x = per_milli/50, y = 20-x; seq_printf(seq, "["); for (i = 0; i < x; i++) seq_printf(seq, "="); @@ -4068,10 +4222,14 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev) seq_printf(seq, "."); seq_printf(seq, "] "); } - seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)", + seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", + (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? + "reshape" : (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? - "resync" : "recovery"), - res/10, res % 10, resync, max_blocks); + "resync" : "recovery")), + per_milli/10, per_milli % 10, + (unsigned long long) resync, + (unsigned long long) max_blocks); /* * We do not want to overflow, so the order of operands and @@ -4085,7 +4243,7 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev) dt = ((jiffies - mddev->resync_mark) / HZ); if (!dt) dt++; db = resync - (mddev->resync_mark_cnt/2); - rt = (dt * ((max_blocks-resync) / (db/100+1)))/100; + rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100; seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); @@ -4442,7 +4600,7 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait); #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) -static void md_do_sync(mddev_t *mddev) +void md_do_sync(mddev_t *mddev) { mddev_t *mddev2; unsigned int currspeed = 0, @@ -4522,7 +4680,9 @@ static void md_do_sync(mddev_t *mddev) */ max_sectors = mddev->resync_max_sectors; mddev->resync_mismatches = 0; - } else + } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) + max_sectors = mddev->size << 1; + else /* recovery follows the physical size of devices */ max_sectors = mddev->size << 1; @@ -4658,6 +4818,8 @@ static void md_do_sync(mddev_t *mddev) mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && + !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && mddev->curr_resync > 2 && mddev->curr_resync >= mddev->recovery_cp) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -4675,6 +4837,7 @@ static void md_do_sync(mddev_t *mddev) set_bit(MD_RECOVERY_DONE, &mddev->recovery); md_wakeup_thread(mddev->thread); } +EXPORT_SYMBOL_GPL(md_do_sync); /* @@ -4730,7 +4893,7 @@ void md_check_recovery(mddev_t *mddev) )) return; - if (mddev_trylock(mddev)==0) { + if (mddev_trylock(mddev)) { int spares =0; spin_lock_irq(&mddev->write_lock); @@ -4866,7 +5029,7 @@ static int md_notify_reboot(struct notifier_block *this, printk(KERN_INFO "md: stopping all md devices.\n"); ITERATE_MDDEV(mddev,tmp) - if (mddev_trylock(mddev)==0) + if (mddev_trylock(mddev)) do_md_stop (mddev, 1); /* * certain more exotic SCSI devices are known to be diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 96f7af4ae400..1cc9de44ce86 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -35,18 +35,6 @@ #define NR_RESERVED_BUFS 32 -static void *mp_pool_alloc(gfp_t gfp_flags, void *data) -{ - struct multipath_bh *mpb; - mpb = kzalloc(sizeof(*mpb), gfp_flags); - return mpb; -} - -static void mp_pool_free(void *mpb, void *data) -{ - kfree(mpb); -} - static int multipath_map (multipath_conf_t *conf) { int i, disks = conf->raid_disks; @@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev) } mddev->degraded = conf->raid_disks = conf->working_disks; - conf->pool = mempool_create(NR_RESERVED_BUFS, - mp_pool_alloc, mp_pool_free, - NULL); + conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS, + sizeof(struct multipath_bh)); if (conf->pool == NULL) { printk(KERN_ERR "multipath: couldn't allocate memory for %s\n", diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5d88329e3c7a..3cb0872a845d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1402,6 +1402,9 @@ static void raid1d(mddev_t *mddev) clear_bit(R1BIO_BarrierRetry, &r1_bio->state); clear_bit(R1BIO_Barrier, &r1_bio->state); for (i=0; i < conf->raid_disks; i++) + if (r1_bio->bios[i]) + atomic_inc(&r1_bio->remaining); + for (i=0; i < conf->raid_disks; i++) if (r1_bio->bios[i]) { struct bio_vec *bvec; int j; @@ -1789,6 +1792,11 @@ static int run(mddev_t *mddev) mdname(mddev), mddev->level); goto out; } + if (mddev->reshape_position != MaxSector) { + printk("raid1: %s: reshape_position set but not supported\n", + mdname(mddev)); + goto out; + } /* * copy the already verified devices into our private RAID1 * bookkeeping area. [whatever we allocate in run(), @@ -1971,7 +1979,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) return 0; } -static int raid1_reshape(mddev_t *mddev, int raid_disks) +static int raid1_reshape(mddev_t *mddev) { /* We need to: * 1/ resize the r1bio_pool @@ -1988,10 +1996,22 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) struct pool_info *newpoolinfo; mirror_info_t *newmirrors; conf_t *conf = mddev_to_conf(mddev); - int cnt; + int cnt, raid_disks; int d, d2; + /* Cannot change chunk_size, layout, or level */ + if (mddev->chunk_size != mddev->new_chunk || + mddev->layout != mddev->new_layout || + mddev->level != mddev->new_level) { + mddev->new_chunk = mddev->chunk_size; + mddev->new_layout = mddev->layout; + mddev->new_level = mddev->level; + return -EINVAL; + } + + raid_disks = mddev->raid_disks + mddev->delta_disks; + if (raid_disks < conf->raid_disks) { cnt=0; for (d= 0; d < conf->raid_disks; d++) @@ -2038,6 +2058,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks) mddev->degraded += (raid_disks - conf->raid_disks); conf->raid_disks = mddev->raid_disks = raid_disks; + mddev->delta_disks = 0; conf->last_used = 0; /* just make sure it is in-range */ lower_barrier(conf); @@ -2079,7 +2100,7 @@ static struct mdk_personality raid1_personality = .spare_active = raid1_spare_active, .sync_request = sync_request, .resize = raid1_resize, - .reshape = raid1_reshape, + .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, }; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2dba305daf3c..dae740adaf65 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -22,6 +22,7 @@ #include <linux/raid/raid5.h> #include <linux/highmem.h> #include <linux/bitops.h> +#include <linux/kthread.h> #include <asm/atomic.h> #include <linux/raid/bitmap.h> @@ -93,11 +94,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); } - list_add_tail(&sh->lru, &conf->inactive_list); atomic_dec(&conf->active_stripes); - if (!conf->inactive_blocked || - atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); wake_up(&conf->wait_for_stripe); + } } } } @@ -178,10 +179,10 @@ static int grow_buffers(struct stripe_head *sh, int num) static void raid5_build_block (struct stripe_head *sh, int i); -static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) +static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) { raid5_conf_t *conf = sh->raid_conf; - int disks = conf->raid_disks, i; + int i; if (atomic_read(&sh->count) != 0) BUG(); @@ -198,7 +199,9 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) sh->pd_idx = pd_idx; sh->state = 0; - for (i=disks; i--; ) { + sh->disks = disks; + + for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (dev->toread || dev->towrite || dev->written || @@ -215,7 +218,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) insert_hash(conf, sh); } -static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) +static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) { struct stripe_head *sh; struct hlist_node *hn; @@ -223,7 +226,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) CHECK_DEVLOCK(); PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) - if (sh->sector == sector) + if (sh->sector == sector && sh->disks == disks) return sh; PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); return NULL; @@ -232,8 +235,8 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) static void unplug_slaves(mddev_t *mddev); static void raid5_unplug_device(request_queue_t *q); -static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, - int pd_idx, int noblock) +static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, + int pd_idx, int noblock) { struct stripe_head *sh; @@ -245,7 +248,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0, conf->device_lock, /* nothing */); - sh = __find_stripe(conf, sector); + sh = __find_stripe(conf, sector, disks); if (!sh) { if (!conf->inactive_blocked) sh = get_free_stripe(conf); @@ -259,11 +262,11 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector < (conf->max_nr_stripes *3/4) || !conf->inactive_blocked), conf->device_lock, - unplug_slaves(conf->mddev); + unplug_slaves(conf->mddev) ); conf->inactive_blocked = 0; } else - init_stripe(sh, sector, pd_idx); + init_stripe(sh, sector, pd_idx, disks); } else { if (atomic_read(&sh->count)) { if (!list_empty(&sh->lru)) @@ -271,9 +274,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); - if (list_empty(&sh->lru)) - BUG(); - list_del_init(&sh->lru); + if (!list_empty(&sh->lru)) + list_del_init(&sh->lru); } } } while (sh == NULL); @@ -300,6 +302,7 @@ static int grow_one_stripe(raid5_conf_t *conf) kmem_cache_free(conf->slab_cache, sh); return 0; } + sh->disks = conf->raid_disks; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -313,14 +316,16 @@ static int grow_stripes(raid5_conf_t *conf, int num) kmem_cache_t *sc; int devs = conf->raid_disks; - sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev)); - - sc = kmem_cache_create(conf->cache_name, + sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); + sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); + conf->active_name = 0; + sc = kmem_cache_create(conf->cache_name[conf->active_name], sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 0, 0, NULL, NULL); if (!sc) return 1; conf->slab_cache = sc; + conf->pool_size = devs; while (num--) { if (!grow_one_stripe(conf)) return 1; @@ -328,6 +333,129 @@ static int grow_stripes(raid5_conf_t *conf, int num) return 0; } +#ifdef CONFIG_MD_RAID5_RESHAPE +static int resize_stripes(raid5_conf_t *conf, int newsize) +{ + /* Make all the stripes able to hold 'newsize' devices. + * New slots in each stripe get 'page' set to a new page. + * + * This happens in stages: + * 1/ create a new kmem_cache and allocate the required number of + * stripe_heads. + * 2/ gather all the old stripe_heads and tranfer the pages across + * to the new stripe_heads. This will have the side effect of + * freezing the array as once all stripe_heads have been collected, + * no IO will be possible. Old stripe heads are freed once their + * pages have been transferred over, and the old kmem_cache is + * freed when all stripes are done. + * 3/ reallocate conf->disks to be suitable bigger. If this fails, + * we simple return a failre status - no need to clean anything up. + * 4/ allocate new pages for the new slots in the new stripe_heads. + * If this fails, we don't bother trying the shrink the + * stripe_heads down again, we just leave them as they are. + * As each stripe_head is processed the new one is released into + * active service. + * + * Once step2 is started, we cannot afford to wait for a write, + * so we use GFP_NOIO allocations. + */ + struct stripe_head *osh, *nsh; + LIST_HEAD(newstripes); + struct disk_info *ndisks; + int err = 0; + kmem_cache_t *sc; + int i; + + if (newsize <= conf->pool_size) + return 0; /* never bother to shrink */ + + /* Step 1 */ + sc = kmem_cache_create(conf->cache_name[1-conf->active_name], + sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), + 0, 0, NULL, NULL); + if (!sc) + return -ENOMEM; + + for (i = conf->max_nr_stripes; i; i--) { + nsh = kmem_cache_alloc(sc, GFP_KERNEL); + if (!nsh) + break; + + memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); + + nsh->raid_conf = conf; + spin_lock_init(&nsh->lock); + + list_add(&nsh->lru, &newstripes); + } + if (i) { + /* didn't get enough, give up */ + while (!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); + list_del(&nsh->lru); + kmem_cache_free(sc, nsh); + } + kmem_cache_destroy(sc); + return -ENOMEM; + } + /* Step 2 - Must use GFP_NOIO now. + * OK, we have enough stripes, start collecting inactive + * stripes and copying them over + */ + list_for_each_entry(nsh, &newstripes, lru) { + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_stripe, + !list_empty(&conf->inactive_list), + conf->device_lock, + unplug_slaves(conf->mddev) + ); + osh = get_free_stripe(conf); + spin_unlock_irq(&conf->device_lock); + atomic_set(&nsh->count, 1); + for(i=0; i<conf->pool_size; i++) + nsh->dev[i].page = osh->dev[i].page; + for( ; i<newsize; i++) + nsh->dev[i].page = NULL; + kmem_cache_free(conf->slab_cache, osh); + } + kmem_cache_destroy(conf->slab_cache); + + /* Step 3. + * At this point, we are holding all the stripes so the array + * is completely stalled, so now is a good time to resize + * conf->disks. + */ + ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); + if (ndisks) { + for (i=0; i<conf->raid_disks; i++) + ndisks[i] = conf->disks[i]; + kfree(conf->disks); + conf->disks = ndisks; + } else + err = -ENOMEM; + + /* Step 4, return new stripes to service */ + while(!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); + list_del_init(&nsh->lru); + for (i=conf->raid_disks; i < newsize; i++) + if (nsh->dev[i].page == NULL) { + struct page *p = alloc_page(GFP_NOIO); + nsh->dev[i].page = p; + if (!p) + err = -ENOMEM; + } + release_stripe(nsh); + } + /* critical section pass, GFP_NOIO no longer needed */ + + conf->slab_cache = sc; + conf->active_name = 1-conf->active_name; + conf->pool_size = newsize; + return err; +} +#endif + static int drop_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; @@ -339,7 +467,7 @@ static int drop_one_stripe(raid5_conf_t *conf) return 0; if (atomic_read(&sh->count)) BUG(); - shrink_buffers(sh, conf->raid_disks); + shrink_buffers(sh, conf->pool_size); kmem_cache_free(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); return 1; @@ -360,7 +488,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, { struct stripe_head *sh = bi->bi_private; raid5_conf_t *conf = sh->raid_conf; - int disks = conf->raid_disks, i; + int disks = sh->disks, i; int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); if (bi->bi_size) @@ -458,7 +586,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, { struct stripe_head *sh = bi->bi_private; raid5_conf_t *conf = sh->raid_conf; - int disks = conf->raid_disks, i; + int disks = sh->disks, i; unsigned long flags; int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); @@ -612,7 +740,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, static sector_t compute_blocknr(struct stripe_head *sh, int i) { raid5_conf_t *conf = sh->raid_conf; - int raid_disks = conf->raid_disks, data_disks = raid_disks - 1; + int raid_disks = sh->disks, data_disks = raid_disks - 1; sector_t new_sector = sh->sector, check; int sectors_per_chunk = conf->chunk_size >> 9; sector_t stripe; @@ -713,8 +841,7 @@ static void copy_data(int frombio, struct bio *bio, static void compute_block(struct stripe_head *sh, int dd_idx) { - raid5_conf_t *conf = sh->raid_conf; - int i, count, disks = conf->raid_disks; + int i, count, disks = sh->disks; void *ptr[MAX_XOR_BLOCKS], *p; PRINTK("compute_block, stripe %llu, idx %d\n", @@ -744,7 +871,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) static void compute_parity(struct stripe_head *sh, int method) { raid5_conf_t *conf = sh->raid_conf; - int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count; + int i, pd_idx = sh->pd_idx, disks = sh->disks, count; void *ptr[MAX_XOR_BLOCKS]; struct bio *chosen; @@ -910,6 +1037,20 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in return 0; } +static void end_reshape(raid5_conf_t *conf); + +static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) +{ + int sectors_per_chunk = conf->chunk_size >> 9; + sector_t x = stripe; + int pd_idx, dd_idx; + int chunk_offset = sector_div(x, sectors_per_chunk); + stripe = x; + raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk + + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf); + return pd_idx; +} + /* * handle_stripe - do things to a stripe. @@ -932,11 +1073,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in static void handle_stripe(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; - int disks = conf->raid_disks; + int disks = sh->disks; struct bio *return_bi= NULL; struct bio *bi; int i; - int syncing; + int syncing, expanding, expanded; int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; int non_overwrite = 0; int failed_num=0; @@ -951,6 +1092,8 @@ static void handle_stripe(struct stripe_head *sh) clear_bit(STRIPE_DELAYED, &sh->state); syncing = test_bit(STRIPE_SYNCING, &sh->state); + expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); + expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); /* Now to look around and see what can be done */ rcu_read_lock(); @@ -1143,13 +1286,14 @@ static void handle_stripe(struct stripe_head *sh) * parity, or to satisfy requests * or to load a block that is being partially written. */ - if (to_read || non_overwrite || (syncing && (uptodate < disks))) { + if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) { for (i=disks; i--;) { dev = &sh->dev[i]; if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || syncing || + expanding || (failed && (sh->dev[failed_num].toread || (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) ) @@ -1339,13 +1483,77 @@ static void handle_stripe(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); set_bit(R5_LOCKED, &dev->flags); + locked++; } else { /* let's read it back */ set_bit(R5_Wantread, &dev->flags); set_bit(R5_LOCKED, &dev->flags); + locked++; } } + if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { + /* Need to write out all blocks after computing parity */ + sh->disks = conf->raid_disks; + sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); + compute_parity(sh, RECONSTRUCT_WRITE); + for (i= conf->raid_disks; i--;) { + set_bit(R5_LOCKED, &sh->dev[i].flags); + locked++; + set_bit(R5_Wantwrite, &sh->dev[i].flags); + } + clear_bit(STRIPE_EXPANDING, &sh->state); + } else if (expanded) { + clear_bit(STRIPE_EXPAND_READY, &sh->state); + atomic_dec(&conf->reshape_stripes); + wake_up(&conf->wait_for_overlap); + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + } + + if (expanding && locked == 0) { + /* We have read all the blocks in this stripe and now we need to + * copy some of them into a target stripe for expand. + */ + clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); + for (i=0; i< sh->disks; i++) + if (i != sh->pd_idx) { + int dd_idx, pd_idx, j; + struct stripe_head *sh2; + + sector_t bn = compute_blocknr(sh, i); + sector_t s = raid5_compute_sector(bn, conf->raid_disks, + conf->raid_disks-1, + &dd_idx, &pd_idx, conf); + sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1); + if (sh2 == NULL) + /* so far only the early blocks of this stripe + * have been requested. When later blocks + * get requested, we will try again + */ + continue; + if(!test_bit(STRIPE_EXPANDING, &sh2->state) || + test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { + /* must have already done this block */ + release_stripe(sh2); + continue; + } + memcpy(page_address(sh2->dev[dd_idx].page), + page_address(sh->dev[i].page), + STRIPE_SIZE); + set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); + set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); + for (j=0; j<conf->raid_disks; j++) + if (j != sh2->pd_idx && + !test_bit(R5_Expanded, &sh2->dev[j].flags)) + break; + if (j == conf->raid_disks) { + set_bit(STRIPE_EXPAND_READY, &sh2->state); + set_bit(STRIPE_HANDLE, &sh2->state); + } + release_stripe(sh2); + } + } + spin_unlock(&sh->lock); while ((bi=return_bi)) { @@ -1384,7 +1592,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_unlock(); if (rdev) { - if (syncing) + if (syncing || expanding || expanded) md_sync_acct(rdev->bdev, STRIPE_SECTORS); bi->bi_bdev = rdev->bdev; @@ -1526,17 +1734,16 @@ static inline void raid5_plug_device(raid5_conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static int make_request (request_queue_t *q, struct bio * bi) +static int make_request(request_queue_t *q, struct bio * bi) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); - const unsigned int raid_disks = conf->raid_disks; - const unsigned int data_disks = raid_disks - 1; unsigned int dd_idx, pd_idx; sector_t new_sector; sector_t logical_sector, last_sector; struct stripe_head *sh; const int rw = bio_data_dir(bi); + int remaining; if (unlikely(bio_barrier(bi))) { bio_endio(bi, bi->bi_size, -EOPNOTSUPP); @@ -1555,20 +1762,77 @@ static int make_request (request_queue_t *q, struct bio * bi) for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); - - new_sector = raid5_compute_sector(logical_sector, - raid_disks, data_disks, &dd_idx, &pd_idx, conf); + int disks; + retry: + prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + if (likely(conf->expand_progress == MaxSector)) + disks = conf->raid_disks; + else { + /* spinlock is needed as expand_progress may be + * 64bit on a 32bit platform, and so it might be + * possible to see a half-updated value + * Ofcourse expand_progress could change after + * the lock is dropped, so once we get a reference + * to the stripe that we think it is, we will have + * to check again. + */ + spin_lock_irq(&conf->device_lock); + disks = conf->raid_disks; + if (logical_sector >= conf->expand_progress) + disks = conf->previous_raid_disks; + else { + if (logical_sector >= conf->expand_lo) { + spin_unlock_irq(&conf->device_lock); + schedule(); + goto retry; + } + } + spin_unlock_irq(&conf->device_lock); + } + new_sector = raid5_compute_sector(logical_sector, disks, disks - 1, + &dd_idx, &pd_idx, conf); PRINTK("raid5: make_request, sector %llu logical %llu\n", (unsigned long long)new_sector, (unsigned long long)logical_sector); - retry: - prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); - sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); + sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); if (sh) { - if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { - /* Add failed due to overlap. Flush everything + if (unlikely(conf->expand_progress != MaxSector)) { + /* expansion might have moved on while waiting for a + * stripe, so we must do the range check again. + * Expansion could still move past after this + * test, but as we are holding a reference to + * 'sh', we know that if that happens, + * STRIPE_EXPANDING will get set and the expansion + * won't proceed until we finish with the stripe. + */ + int must_retry = 0; + spin_lock_irq(&conf->device_lock); + if (logical_sector < conf->expand_progress && + disks == conf->previous_raid_disks) + /* mismatch, need to try again */ + must_retry = 1; + spin_unlock_irq(&conf->device_lock); + if (must_retry) { + release_stripe(sh); + goto retry; + } + } + /* FIXME what if we get a false positive because these + * are being updated. + */ + if (logical_sector >= mddev->suspend_lo && + logical_sector < mddev->suspend_hi) { + release_stripe(sh); + schedule(); + goto retry; + } + + if (test_bit(STRIPE_EXPANDING, &sh->state) || + !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { + /* Stripe is busy expanding or + * add failed due to overlap. Flush everything * and wait a while */ raid5_unplug_device(mddev->queue); @@ -1580,7 +1844,6 @@ static int make_request (request_queue_t *q, struct bio * bi) raid5_plug_device(conf); handle_stripe(sh); release_stripe(sh); - } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -1590,7 +1853,9 @@ static int make_request (request_queue_t *q, struct bio * bi) } spin_lock_irq(&conf->device_lock); - if (--bi->bi_phys_segments == 0) { + remaining = --bi->bi_phys_segments; + spin_unlock_irq(&conf->device_lock); + if (remaining == 0) { int bytes = bi->bi_size; if ( bio_data_dir(bi) == WRITE ) @@ -1598,7 +1863,6 @@ static int make_request (request_queue_t *q, struct bio * bi) bi->bi_size = 0; bi->bi_end_io(bi, bytes, 0); } - spin_unlock_irq(&conf->device_lock); return 0; } @@ -1607,12 +1871,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; struct stripe_head *sh; - int sectors_per_chunk = conf->chunk_size >> 9; - sector_t x; - unsigned long stripe; - int chunk_offset; - int dd_idx, pd_idx; - sector_t first_sector; + int pd_idx; + sector_t first_sector, last_sector; int raid_disks = conf->raid_disks; int data_disks = raid_disks-1; sector_t max_sector = mddev->size << 1; @@ -1621,6 +1881,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i if (sector_nr >= max_sector) { /* just being told to finish up .. nothing much to do */ unplug_slaves(mddev); + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { + end_reshape(conf); + return 0; + } if (mddev->curr_resync < max_sector) /* aborted */ bitmap_end_sync(mddev->bitmap, mddev->curr_resync, @@ -1631,6 +1895,123 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return 0; } + + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { + /* reshaping is quite different to recovery/resync so it is + * handled quite separately ... here. + * + * On each call to sync_request, we gather one chunk worth of + * destination stripes and flag them as expanding. + * Then we find all the source stripes and request reads. + * As the reads complete, handle_stripe will copy the data + * into the destination stripe and release that stripe. + */ + int i; + int dd_idx; + sector_t writepos, safepos, gap; + + if (sector_nr == 0 && + conf->expand_progress != 0) { + /* restarting in the middle, skip the initial sectors */ + sector_nr = conf->expand_progress; + sector_div(sector_nr, conf->raid_disks-1); + *skipped = 1; + return sector_nr; + } + + /* we update the metadata when there is more than 3Meg + * in the block range (that is rather arbitrary, should + * probably be time based) or when the data about to be + * copied would over-write the source of the data at + * the front of the range. + * i.e. one new_stripe forward from expand_progress new_maps + * to after where expand_lo old_maps to + */ + writepos = conf->expand_progress + + conf->chunk_size/512*(conf->raid_disks-1); + sector_div(writepos, conf->raid_disks-1); + safepos = conf->expand_lo; + sector_div(safepos, conf->previous_raid_disks-1); + gap = conf->expand_progress - conf->expand_lo; + + if (writepos >= safepos || + gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) { + /* Cannot proceed until we've updated the superblock... */ + wait_event(conf->wait_for_overlap, + atomic_read(&conf->reshape_stripes)==0); + mddev->reshape_position = conf->expand_progress; + mddev->sb_dirty = 1; + md_wakeup_thread(mddev->thread); + wait_event(mddev->sb_wait, mddev->sb_dirty == 0 || + kthread_should_stop()); + spin_lock_irq(&conf->device_lock); + conf->expand_lo = mddev->reshape_position; + spin_unlock_irq(&conf->device_lock); + wake_up(&conf->wait_for_overlap); + } + + for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { + int j; + int skipped = 0; + pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); + sh = get_active_stripe(conf, sector_nr+i, + conf->raid_disks, pd_idx, 0); + set_bit(STRIPE_EXPANDING, &sh->state); + atomic_inc(&conf->reshape_stripes); + /* If any of this stripe is beyond the end of the old + * array, then we need to zero those blocks + */ + for (j=sh->disks; j--;) { + sector_t s; + if (j == sh->pd_idx) + continue; + s = compute_blocknr(sh, j); + if (s < (mddev->array_size<<1)) { + skipped = 1; + continue; + } + memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); + set_bit(R5_Expanded, &sh->dev[j].flags); + set_bit(R5_UPTODATE, &sh->dev[j].flags); + } + if (!skipped) { + set_bit(STRIPE_EXPAND_READY, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + } + release_stripe(sh); + } + spin_lock_irq(&conf->device_lock); + conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); + spin_unlock_irq(&conf->device_lock); + /* Ok, those stripe are ready. We can start scheduling + * reads on the source stripes. + * The source stripes are determined by mapping the first and last + * block on the destination stripes. + */ + raid_disks = conf->previous_raid_disks; + data_disks = raid_disks - 1; + first_sector = + raid5_compute_sector(sector_nr*(conf->raid_disks-1), + raid_disks, data_disks, + &dd_idx, &pd_idx, conf); + last_sector = + raid5_compute_sector((sector_nr+conf->chunk_size/512) + *(conf->raid_disks-1) -1, + raid_disks, data_disks, + &dd_idx, &pd_idx, conf); + if (last_sector >= (mddev->size<<1)) + last_sector = (mddev->size<<1)-1; + while (first_sector <= last_sector) { + pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks); + sh = get_active_stripe(conf, first_sector, + conf->previous_raid_disks, pd_idx, 0); + set_bit(STRIPE_EXPAND_SOURCE, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); + release_stripe(sh); + first_sector += STRIPE_SECTORS; + } + return conf->chunk_size>>9; + } /* if there is 1 or more failed drives and we are trying * to resync, then assert that we are finished, because there is * nothing we can do. @@ -1649,16 +2030,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ } - x = sector_nr; - chunk_offset = sector_div(x, sectors_per_chunk); - stripe = x; - BUG_ON(x != stripe); - - first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk - + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); - sh = get_active_stripe(conf, sector_nr, pd_idx, 1); + pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); + sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); if (sh == NULL) { - sh = get_active_stripe(conf, sector_nr, pd_idx, 0); + sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ @@ -1822,11 +2197,64 @@ static int run(mddev_t *mddev) return -EIO; } - mddev->private = kzalloc(sizeof (raid5_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + if (mddev->reshape_position != MaxSector) { + /* Check that we can continue the reshape. + * Currently only disks can change, it must + * increase, and we must be past the point where + * a stripe over-writes itself + */ + sector_t here_new, here_old; + int old_disks; + + if (mddev->new_level != mddev->level || + mddev->new_layout != mddev->layout || + mddev->new_chunk != mddev->chunk_size) { + printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->delta_disks <= 0) { + printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n", + mdname(mddev)); + return -EINVAL; + } + old_disks = mddev->raid_disks - mddev->delta_disks; + /* reshape_position must be on a new-stripe boundary, and one + * further up in new geometry must map after here in old geometry. + */ + here_new = mddev->reshape_position; + if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) { + printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n"); + return -EINVAL; + } + /* here_new is the stripe we will write to */ + here_old = mddev->reshape_position; + sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1)); + /* here_old is the first stripe that we might need to read from */ + if (here_new >= here_old) { + /* Reading from the same stripe as writing to - bad */ + printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n"); + return -EINVAL; + } + printk(KERN_INFO "raid5: reshape will continue\n"); + /* OK, we should be able to continue; */ + } + + + mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; + if (mddev->reshape_position == MaxSector) { + conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; + } else { + conf->raid_disks = mddev->raid_disks; + conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; + } + + conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); + if (!conf->disks) + goto abort; conf->mddev = mddev; @@ -1847,7 +2275,7 @@ static int run(mddev_t *mddev) ITERATE_RDEV(mddev,rdev,tmp) { raid_disk = rdev->raid_disk; - if (raid_disk >= mddev->raid_disks + if (raid_disk >= conf->raid_disks || raid_disk < 0) continue; disk = conf->disks + raid_disk; @@ -1863,7 +2291,6 @@ static int run(mddev_t *mddev) } } - conf->raid_disks = mddev->raid_disks; /* * 0 for a fully functional array, 1 for a degraded array. */ @@ -1873,6 +2300,7 @@ static int run(mddev_t *mddev) conf->level = mddev->level; conf->algorithm = mddev->layout; conf->max_nr_stripes = NR_STRIPES; + conf->expand_progress = mddev->reshape_position; /* device size must be a multiple of chunk size */ mddev->size &= ~(mddev->chunk_size/1024 -1); @@ -1945,6 +2373,21 @@ static int run(mddev_t *mddev) print_raid5_conf(conf); + if (conf->expand_progress != MaxSector) { + printk("...ok start reshape thread\n"); + conf->expand_lo = conf->expand_progress; + atomic_set(&conf->reshape_stripes, 0); + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "%s_reshape"); + /* FIXME if md_register_thread fails?? */ + md_wakeup_thread(mddev->sync_thread); + + } + /* read-ahead size must cover two whole stripes, which is * 2 * (n-1) * chunksize where 'n' is the number of raid devices */ @@ -1960,12 +2403,13 @@ static int run(mddev_t *mddev) mddev->queue->unplug_fn = raid5_unplug_device; mddev->queue->issue_flush_fn = raid5_issue_flush; + mddev->array_size = mddev->size * (conf->previous_raid_disks - 1); - mddev->array_size = mddev->size * (mddev->raid_disks - 1); return 0; abort: if (conf) { print_raid5_conf(conf); + kfree(conf->disks); kfree(conf->stripe_hashtbl); kfree(conf); } @@ -1986,6 +2430,7 @@ static int stop(mddev_t *mddev) kfree(conf->stripe_hashtbl); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); + kfree(conf->disks); kfree(conf); mddev->private = NULL; return 0; @@ -2001,7 +2446,7 @@ static void print_sh (struct stripe_head *sh) printk("sh %llu, count %d.\n", (unsigned long long)sh->sector, atomic_read(&sh->count)); printk("sh %llu, ", (unsigned long long)sh->sector); - for (i = 0; i < sh->raid_conf->raid_disks; i++) { + for (i = 0; i < sh->disks; i++) { printk("(cache%d: %p %ld) ", i, sh->dev[i].page, sh->dev[i].flags); } @@ -2132,7 +2577,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) /* * find the disk ... */ - for (disk=0; disk < mddev->raid_disks; disk++) + for (disk=0; disk < conf->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; @@ -2168,11 +2613,146 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) return 0; } +#ifdef CONFIG_MD_RAID5_RESHAPE +static int raid5_check_reshape(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + int err; + + if (mddev->delta_disks < 0 || + mddev->new_level != mddev->level) + return -EINVAL; /* Cannot shrink array or change level yet */ + if (mddev->delta_disks == 0) + return 0; /* nothing to do */ + + /* Can only proceed if there are plenty of stripe_heads. + * We need a minimum of one full stripe,, and for sensible progress + * it is best to have about 4 times that. + * If we require 4 times, then the default 256 4K stripe_heads will + * allow for chunk sizes up to 256K, which is probably OK. + * If the chunk size is greater, user-space should request more + * stripe_heads first. + */ + if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || + (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { + printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", + (mddev->chunk_size / STRIPE_SIZE)*4); + return -ENOSPC; + } + + err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); + if (err) + return err; + + /* looks like we might be able to manage this */ + return 0; +} + +static int raid5_start_reshape(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev_to_conf(mddev); + mdk_rdev_t *rdev; + struct list_head *rtmp; + int spares = 0; + int added_devices = 0; + + if (mddev->degraded || + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return -EBUSY; + + ITERATE_RDEV(mddev, rdev, rtmp) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) + spares++; + + if (spares < mddev->delta_disks-1) + /* Not enough devices even to make a degraded array + * of that size + */ + return -EINVAL; + + atomic_set(&conf->reshape_stripes, 0); + spin_lock_irq(&conf->device_lock); + conf->previous_raid_disks = conf->raid_disks; + conf->raid_disks += mddev->delta_disks; + conf->expand_progress = 0; + conf->expand_lo = 0; + spin_unlock_irq(&conf->device_lock); + + /* Add some new drives, as many as will fit. + * We know there are enough to make the newly sized array work. + */ + ITERATE_RDEV(mddev, rdev, rtmp) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) { + if (raid5_add_disk(mddev, rdev)) { + char nm[20]; + set_bit(In_sync, &rdev->flags); + conf->working_disks++; + added_devices++; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); + } else + break; + } + + mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; + mddev->raid_disks = conf->raid_disks; + mddev->reshape_position = 0; + mddev->sb_dirty = 1; + + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "%s_reshape"); + if (!mddev->sync_thread) { + mddev->recovery = 0; + spin_lock_irq(&conf->device_lock); + mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + conf->expand_progress = MaxSector; + spin_unlock_irq(&conf->device_lock); + return -EAGAIN; + } + md_wakeup_thread(mddev->sync_thread); + md_new_event(mddev); + return 0; +} +#endif + +static void end_reshape(raid5_conf_t *conf) +{ + struct block_device *bdev; + + if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { + conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1); + set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); + conf->mddev->changed = 1; + + bdev = bdget_disk(conf->mddev->gendisk, 0); + if (bdev) { + mutex_lock(&bdev->bd_inode->i_mutex); + i_size_write(bdev->bd_inode, conf->mddev->array_size << 10); + mutex_unlock(&bdev->bd_inode->i_mutex); + bdput(bdev); + } + spin_lock_irq(&conf->device_lock); + conf->expand_progress = MaxSector; + spin_unlock_irq(&conf->device_lock); + conf->mddev->reshape_position = MaxSector; + } +} + static void raid5_quiesce(mddev_t *mddev, int state) { raid5_conf_t *conf = mddev_to_conf(mddev); switch(state) { + case 2: /* resume for a suspend */ + wake_up(&conf->wait_for_overlap); + break; + case 1: /* stop all writes */ spin_lock_irq(&conf->device_lock); conf->quiesce = 1; @@ -2186,6 +2766,7 @@ static void raid5_quiesce(mddev_t *mddev, int state) spin_lock_irq(&conf->device_lock); conf->quiesce = 0; wake_up(&conf->wait_for_stripe); + wake_up(&conf->wait_for_overlap); spin_unlock_irq(&conf->device_lock); break; } @@ -2206,6 +2787,10 @@ static struct mdk_personality raid5_personality = .spare_active = raid5_spare_active, .sync_request = sync_request, .resize = raid5_resize, +#ifdef CONFIG_MD_RAID5_RESHAPE + .check_reshape = raid5_check_reshape, + .start_reshape = raid5_start_reshape, +#endif .quiesce = raid5_quiesce, }; diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index cd477ebf2ee4..6df4930fddec 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -331,9 +331,9 @@ static int grow_stripes(raid6_conf_t *conf, int num) kmem_cache_t *sc; int devs = conf->raid_disks; - sprintf(conf->cache_name, "raid6/%s", mdname(conf->mddev)); + sprintf(conf->cache_name[0], "raid6/%s", mdname(conf->mddev)); - sc = kmem_cache_create(conf->cache_name, + sc = kmem_cache_create(conf->cache_name[0], sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 0, 0, NULL, NULL); if (!sc) @@ -2006,11 +2006,14 @@ static int run(mddev_t *mddev) return -EIO; } - mddev->private = kzalloc(sizeof (raid6_conf_t) - + mddev->raid_disks * sizeof(struct disk_info), - GFP_KERNEL); + mddev->private = kzalloc(sizeof (raid6_conf_t), GFP_KERNEL); if ((conf = mddev->private) == NULL) goto abort; + conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info), + GFP_KERNEL); + if (!conf->disks) + goto abort; + conf->mddev = mddev; if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) @@ -2158,6 +2161,7 @@ abort: print_raid6_conf(conf); safe_put_page(conf->spare_page); kfree(conf->stripe_hashtbl); + kfree(conf->disks); kfree(conf); } mddev->private = NULL; diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index 54f8b95717b0..96fe0ecae250 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -86,7 +86,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) if (dvbdev && dvbdev->fops) { int err = 0; - struct file_operations *old_fops; + const struct file_operations *old_fops; file->private_data = dvbdev; old_fops = file->f_op; diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c index 75e3d41382f2..5f87dd5f1d0b 100644 --- a/drivers/media/video/videodev.c +++ b/drivers/media/video/videodev.c @@ -97,7 +97,7 @@ static int video_open(struct inode *inode, struct file *file) unsigned int minor = iminor(inode); int err = 0; struct video_device *vfl; - struct file_operations *old_fops; + const struct file_operations *old_fops; if(minor>=VIDEO_NUM_DEVICES) return -ENODEV; diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index b09fb6307153..7d4c5497785b 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void) goto exit; } - i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, - mempool_alloc_slab, - mempool_free_slab, - i2o_blk_req_pool.slab); + i2o_blk_req_pool.pool = + mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, + i2o_blk_req_pool.slab); if (!i2o_blk_req_pool.pool) { osm_err("can't init request mempool\n"); rc = -ENOMEM; diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index 2a0c42b8cda5..3d2e76eea93e 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c @@ -56,7 +56,7 @@ typedef struct _i2o_proc_entry_t { char *name; /* entry name */ mode_t mode; /* mode */ - struct file_operations *fops; /* open function */ + const struct file_operations *fops; /* open function */ } i2o_proc_entry; /* global I2O /proc/i2o entry */ diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c index f295401fac21..7fd7a43e38de 100644 --- a/drivers/misc/ibmasm/heartbeat.c +++ b/drivers/misc/ibmasm/heartbeat.c @@ -52,12 +52,13 @@ static struct notifier_block panic_notifier = { panic_happened, NULL, 1 }; void ibmasm_register_panic_notifier(void) { - notifier_chain_register(&panic_notifier_list, &panic_notifier); + atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier); } void ibmasm_unregister_panic_notifier(void) { - notifier_chain_unregister(&panic_notifier_list, &panic_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_notifier); } diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index 5c550fcac2c4..26a230b6ff80 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -101,7 +101,7 @@ static struct super_operations ibmasmfs_s_ops = { .drop_inode = generic_delete_inode, }; -static struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; +static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; static struct file_system_type ibmasmfs_type = { .owner = THIS_MODULE, diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index 205bb7083335..0f6bb2e625d8 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -25,9 +25,8 @@ config MTD_JEDECPROBE compatible with the Common Flash Interface, but will use the common CFI-targetted flash drivers for any chips which are identified which are in fact compatible in all but the probe method. This actually - covers most AMD/Fujitsu-compatible chips, and will shortly cover also - non-CFI Intel chips (that code is in MTD CVS and should shortly be sent - for inclusion in Linus' tree) + covers most AMD/Fujitsu-compatible chips and also non-CFI + Intel chips. config MTD_GEN_PROBE tristate diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c index b51c757817d8..efb221692641 100644 --- a/drivers/mtd/maps/dilnetpc.c +++ b/drivers/mtd/maps/dilnetpc.c @@ -218,8 +218,8 @@ static void dnp_set_vpp(struct map_info *not_used, int on) { if(--vpp_counter == 0) setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4); - else if(vpp_counter < 0) - BUG(); + else + BUG_ON(vpp_counter < 0); } spin_unlock_irq(&dnpc_spin); } @@ -240,8 +240,8 @@ static void adnp_set_vpp(struct map_info *not_used, int on) { if(--vpp_counter == 0) setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8); - else if(vpp_counter < 0) - BUG(); + else + BUG_ON(vpp_counter < 0); } spin_unlock_irq(&dnpc_spin); } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 7f3ff500b68e..840dd66ce2dc 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -450,8 +450,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) kfree(tr->blkcore_priv); - if (!list_empty(&tr->devs)) - BUG(); + BUG_ON(!list_empty(&tr->devs)); return 0; } diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index b1bf8c411de7..9af840364a74 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -477,8 +477,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) } /* must never happen since size limit has been verified above */ - if (i >= concat->num_subdev) - BUG(); + BUG_ON(i >= concat->num_subdev); /* now do the erase: */ err = 0; @@ -500,8 +499,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) if ((err = concat_dev_erase(subdev, erase))) { /* sanity check: should never happen since * block alignment has been checked above */ - if (err == -EINVAL) - BUG(); + BUG_ON(err == -EINVAL); if (erase->fail_addr != 0xffffffff) instr->fail_addr = erase->fail_addr + offset; break; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index d339308539fa..70f63891b19c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -196,8 +196,6 @@ #define DRV_NAME "3c59x" -#define DRV_VERSION "LK1.1.19" -#define DRV_RELDATE "10 Nov 2002" @@ -275,10 +273,8 @@ static char version[] __devinitdata = DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); -MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver " - DRV_VERSION " " DRV_RELDATE); +MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); /* Operational parameter that usually are not changed. */ @@ -904,7 +900,6 @@ static void acpi_set_WOL(struct net_device *dev); static struct ethtool_ops vortex_ethtool_ops; static void set_8021q_mode(struct net_device *dev, int enable); - /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ /* Option count limit only -- unlimited interfaces are supported. */ #define MAX_UNITS 8 @@ -919,8 +914,6 @@ static int global_full_duplex = -1; static int global_enable_wol = -1; static int global_use_mmio = -1; -/* #define dev_alloc_skb dev_alloc_skb_debug */ - /* Variables to work-around the Compaq PCI BIOS32 problem. */ static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; static struct net_device *compaq_net_device; @@ -976,7 +969,7 @@ static void poll_vortex(struct net_device *dev) #ifdef CONFIG_PM -static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) +static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); @@ -994,7 +987,7 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int vortex_resume (struct pci_dev *pdev) +static int vortex_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct vortex_private *vp = netdev_priv(dev); @@ -1027,8 +1020,8 @@ static struct eisa_device_id vortex_eisa_ids[] = { { "" } }; -static int vortex_eisa_probe (struct device *device); -static int vortex_eisa_remove (struct device *device); +static int vortex_eisa_probe(struct device *device); +static int vortex_eisa_remove(struct device *device); static struct eisa_driver vortex_eisa_driver = { .id_table = vortex_eisa_ids, @@ -1039,12 +1032,12 @@ static struct eisa_driver vortex_eisa_driver = { } }; -static int vortex_eisa_probe (struct device *device) +static int vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; - edev = to_eisa_device (device); + edev = to_eisa_device(device); if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) return -EBUSY; @@ -1053,7 +1046,7 @@ static int vortex_eisa_probe (struct device *device) if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, edev->id.driver_data, vortex_cards_found)) { - release_region (edev->base_addr, VORTEX_TOTAL_SIZE); + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); return -ENODEV; } @@ -1062,15 +1055,15 @@ static int vortex_eisa_probe (struct device *device) return 0; } -static int vortex_eisa_remove (struct device *device) +static int vortex_eisa_remove(struct device *device) { struct eisa_device *edev; struct net_device *dev; struct vortex_private *vp; void __iomem *ioaddr; - edev = to_eisa_device (device); - dev = eisa_get_drvdata (edev); + edev = to_eisa_device(device); + dev = eisa_get_drvdata(edev); if (!dev) { printk("vortex_eisa_remove called for Compaq device!\n"); @@ -1080,17 +1073,17 @@ static int vortex_eisa_remove (struct device *device) vp = netdev_priv(dev); ioaddr = vp->ioaddr; - unregister_netdev (dev); - iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD); - release_region (dev->base_addr, VORTEX_TOTAL_SIZE); + unregister_netdev(dev); + iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); + release_region(dev->base_addr, VORTEX_TOTAL_SIZE); - free_netdev (dev); + free_netdev(dev); return 0; } #endif /* returns count found (>= 0), or negative on error */ -static int __init vortex_eisa_init (void) +static int __init vortex_eisa_init(void) { int eisa_found = 0; int orig_cards_found = vortex_cards_found; @@ -1121,7 +1114,7 @@ static int __init vortex_eisa_init (void) } /* returns count (>= 0), or negative on error */ -static int __devinit vortex_init_one (struct pci_dev *pdev, +static int __devinit vortex_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc, unit, pci_bar; @@ -1129,7 +1122,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev, void __iomem *ioaddr; /* wake up and enable device */ - rc = pci_enable_device (pdev); + rc = pci_enable_device(pdev); if (rc < 0) goto out; @@ -1151,7 +1144,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev, rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); if (rc < 0) { - pci_disable_device (pdev); + pci_disable_device(pdev); goto out; } @@ -1236,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev, if (print_info) printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); - printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n", + printk(KERN_INFO "%s: 3Com %s %s at %p.\n", print_name, pdev ? "PCI" : "EISA", vci->name, @@ -1266,7 +1259,7 @@ static int __devinit vortex_probe1(struct device *gendev, /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) - pci_set_master (pdev); + pci_set_master(pdev); if (vci->drv_flags & IS_VORTEX) { u8 pci_latency; @@ -1310,7 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev, if (pdev) pci_set_drvdata(pdev, dev); if (edev) - eisa_set_drvdata (edev, dev); + eisa_set_drvdata(edev, dev); vp->media_override = 7; if (option >= 0) { @@ -1335,7 +1328,7 @@ static int __devinit vortex_probe1(struct device *gendev, vp->enable_wol = 1; } - vp->force_fd = vp->full_duplex; + vp->mii.force_media = vp->full_duplex; vp->options = option; /* Read the station address from the EEPROM. */ EL3WINDOW(0); @@ -1625,6 +1618,46 @@ issue_and_wait(struct net_device *dev, int cmd) } static void +vortex_set_duplex(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + printk(KERN_INFO "%s: setting %s-duplex.\n", + dev->name, (vp->full_duplex) ? "full" : "half"); + + EL3WINDOW(3); + /* Set the full-duplex bit. */ + iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | + (vp->large_frames ? 0x40 : 0) | + ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? + 0x100 : 0), + ioaddr + Wn3_MAC_Ctrl); + + issue_and_wait(dev, TxReset); + /* + * Don't reset the PHY - that upsets autonegotiation during DHCP operations. + */ + issue_and_wait(dev, RxReset|0x04); +} + +static void vortex_check_media(struct net_device *dev, unsigned int init) +{ + struct vortex_private *vp = netdev_priv(dev); + unsigned int ok_to_print = 0; + + if (vortex_debug > 3) + ok_to_print = 1; + + if (mii_check_media(&vp->mii, ok_to_print, init)) { + vp->full_duplex = vp->mii.full_duplex; + vortex_set_duplex(dev); + } else if (init) { + vortex_set_duplex(dev); + } +} + +static void vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); @@ -1684,53 +1717,20 @@ vortex_up(struct net_device *dev) printk(KERN_DEBUG "%s: Initial media type %s.\n", dev->name, media_tbl[dev->if_port].name); - vp->full_duplex = vp->force_fd; + vp->full_duplex = vp->mii.force_media; config = BFINS(config, dev->if_port, 20, 4); if (vortex_debug > 6) printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); iowrite32(config, ioaddr + Wn3_Config); + netif_carrier_off(dev); if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { - int mii_reg1, mii_reg5; EL3WINDOW(4); - /* Read BMSR (reg1) only to clear old status. */ - mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); - mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); - if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) { - netif_carrier_off(dev); /* No MII device or no link partner report */ - } else { - mii_reg5 &= vp->advertising; - if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */ - || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */ - vp->full_duplex = 1; - netif_carrier_on(dev); - } - vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); - if (vortex_debug > 1) - printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x," - " info1 %04x, setting %s-duplex.\n", - dev->name, vp->phys[0], - mii_reg1, mii_reg5, - vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half"); - EL3WINDOW(3); - } - - /* Set the full-duplex bit. */ - iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | - (vp->large_frames ? 0x40 : 0) | - ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), - ioaddr + Wn3_MAC_Ctrl); - - if (vortex_debug > 1) { - printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n", - dev->name, config); + vortex_check_media(dev, 1); } + else + vortex_set_duplex(dev); - issue_and_wait(dev, TxReset); - /* - * Don't reset the PHY - that upsets autonegotiation during DHCP operations. - */ - issue_and_wait(dev, RxReset|0x04); iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); @@ -1805,7 +1805,6 @@ vortex_up(struct net_device *dev) set_8021q_mode(dev, 1); iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ -// issue_and_wait(dev, SetTxStart|0x07ff); iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ /* Allow status bits to be seen. */ @@ -1892,7 +1891,7 @@ vortex_timer(unsigned long data) void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; - int media_status, mii_status, old_window; + int media_status, old_window; if (vortex_debug > 2) { printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", @@ -1900,8 +1899,6 @@ vortex_timer(unsigned long data) printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } - if (vp->medialock) - goto leave_media_alone; disable_irq(dev->irq); old_window = ioread16(ioaddr + EL3_CMD) >> 13; EL3WINDOW(4); @@ -1924,44 +1921,9 @@ vortex_timer(unsigned long data) break; case XCVR_MII: case XCVR_NWAY: { - spin_lock_bh(&vp->lock); - mii_status = mdio_read(dev, vp->phys[0], MII_BMSR); - if (!(mii_status & BMSR_LSTATUS)) { - /* Re-read to get actual link status */ - mii_status = mdio_read(dev, vp->phys[0], MII_BMSR); - } ok = 1; - if (vortex_debug > 2) - printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", - dev->name, mii_status); - if (mii_status & BMSR_LSTATUS) { - int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); - if (! vp->force_fd && mii_reg5 != 0xffff) { - int duplex; - - mii_reg5 &= vp->advertising; - duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040; - if (vp->full_duplex != duplex) { - vp->full_duplex = duplex; - printk(KERN_INFO "%s: Setting %s-duplex based on MII " - "#%d link partner capability of %4.4x.\n", - dev->name, vp->full_duplex ? "full" : "half", - vp->phys[0], mii_reg5); - /* Set the full-duplex bit. */ - EL3WINDOW(3); - iowrite16( (vp->full_duplex ? 0x20 : 0) | - (vp->large_frames ? 0x40 : 0) | - ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), - ioaddr + Wn3_MAC_Ctrl); - if (vortex_debug > 1) - printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n"); - /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */ - } - } - netif_carrier_on(dev); - } else { - netif_carrier_off(dev); - } + spin_lock_bh(&vp->lock); + vortex_check_media(dev, 0); spin_unlock_bh(&vp->lock); } break; @@ -1971,7 +1933,14 @@ vortex_timer(unsigned long data) dev->name, media_tbl[dev->if_port].name, media_status); ok = 1; } - if ( ! ok) { + + if (!netif_carrier_ok(dev)) + next_tick = 5*HZ; + + if (vp->medialock) + goto leave_media_alone; + + if (!ok) { unsigned int config; do { @@ -2004,14 +1973,14 @@ vortex_timer(unsigned long data) printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ } - EL3WINDOW(old_window); - enable_irq(dev->irq); leave_media_alone: if (vortex_debug > 2) printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); + EL3WINDOW(old_window); + enable_irq(dev->irq); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); @@ -2206,7 +2175,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; - iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), + iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), ioaddr + Wn7_MasterAddr); iowrite16(len, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; @@ -2983,20 +2952,6 @@ static int vortex_nway_reset(struct net_device *dev) return rc; } -static u32 vortex_get_link(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - int rc; - - spin_lock_irqsave(&vp->lock, flags); - EL3WINDOW(4); - rc = mii_link_ok(&vp->mii); - spin_unlock_irqrestore(&vp->lock, flags); - return rc; -} - static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); @@ -3077,7 +3032,6 @@ static void vortex_get_drvinfo(struct net_device *dev, struct vortex_private *vp = netdev_priv(dev); strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); if (VORTEX_PCI(vp)) { strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); } else { @@ -3098,9 +3052,9 @@ static struct ethtool_ops vortex_ethtool_ops = { .get_stats_count = vortex_get_stats_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, - .get_link = vortex_get_link, + .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_perm_addr = ethtool_op_get_perm_addr, }; #ifdef CONFIG_PCI @@ -3301,7 +3255,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val } return; } - + /* ACPI: Advanced Configuration and Power Interface. */ /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ static void acpi_set_WOL(struct net_device *dev) @@ -3325,7 +3279,7 @@ static void acpi_set_WOL(struct net_device *dev) } -static void __devexit vortex_remove_one (struct pci_dev *pdev) +static void __devexit vortex_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct vortex_private *vp; @@ -3381,7 +3335,7 @@ static int vortex_have_pci; static int vortex_have_eisa; -static int __init vortex_init (void) +static int __init vortex_init(void) { int pci_rc, eisa_rc; @@ -3397,14 +3351,14 @@ static int __init vortex_init (void) } -static void __exit vortex_eisa_cleanup (void) +static void __exit vortex_eisa_cleanup(void) { struct vortex_private *vp; void __iomem *ioaddr; #ifdef CONFIG_EISA /* Take care of the EISA devices */ - eisa_driver_unregister (&vortex_eisa_driver); + eisa_driver_unregister(&vortex_eisa_driver); #endif if (compaq_net_device) { @@ -3412,33 +3366,24 @@ static void __exit vortex_eisa_cleanup (void) ioaddr = ioport_map(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); - unregister_netdev (compaq_net_device); - iowrite16 (TotalReset, ioaddr + EL3_CMD); + unregister_netdev(compaq_net_device); + iowrite16(TotalReset, ioaddr + EL3_CMD); release_region(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); - free_netdev (compaq_net_device); + free_netdev(compaq_net_device); } } -static void __exit vortex_cleanup (void) +static void __exit vortex_cleanup(void) { if (vortex_have_pci) - pci_unregister_driver (&vortex_driver); + pci_unregister_driver(&vortex_driver); if (vortex_have_eisa) - vortex_eisa_cleanup (); + vortex_eisa_cleanup(); } module_init(vortex_init); module_exit(vortex_cleanup); - - -/* - * Local variables: - * c-indent-level: 4 - * c-basic-offset: 4 - * tab-width: 4 - * End: - */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2d0ac169a86c..f13a539dc169 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3159,7 +3159,7 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave * bond_netdev_event: handle netdev notifier chain events. * * This function receives events for the netdev chain. The caller (an - * ioctl handler calling notifier_call_chain) holds the necessary + * ioctl handler calling blocking_notifier_call_chain) holds the necessary * locks for us to safely manipulate the slave devices (RTNL lock, * dev_probe_lock). */ diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e7fc28b07e5a..7627a75f4f7c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -134,6 +134,7 @@ #include <linux/random.h> #include <linux/init.h> #include <linux/if_vlan.h> +#include <linux/dma-mapping.h> #include <asm/irq.h> #include <asm/io.h> @@ -2932,7 +2933,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ np->desc_ver = DESC_VER_3; - if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { + if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); } else { diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 9b8295ee06ef..ae71ed57c12d 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -44,6 +44,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> +#include <linux/dma-mapping.h> #ifdef CONFIG_SERIAL_8250 #include <linux/serial_core.h> @@ -1195,17 +1196,17 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err, pci_using_dac; /* Configure DMA attributes. */ - err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (!err) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); if (err < 0) { printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " "for consistent allocations\n", pci_name(pdev)); goto out; } } else { - err = pci_set_dma_mask(pdev, 0xffffffffULL); + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR "%s: No usable DMA configuration, " "aborting.\n", pci_name(pdev)); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 83141a3ff546..cc7ff8f00e42 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -207,7 +207,7 @@ static int __init nsc_ircc_init(void) /* Register with PnP subsystem to detect disable ports */ ret = pnp_register_driver(&nsc_ircc_pnp_driver); - if (ret >= 0) + if (!ret) pnp_registered = 1; ret = -ENODEV; @@ -812,7 +812,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info) int cfg_base = info->cfg_base; int enabled; - /* User is shure about his config... accept it. */ + /* User is sure about his config... accept it. */ IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " "io=0x%04x, irq=%d, dma=%d\n", __FUNCTION__, info->fir_base, info->irq, info->dma); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 0c13795dca38..b79d6e8d3045 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -172,7 +172,7 @@ static struct net_device_stats *get_stats(struct net_device *dev) memset(stats, 0, sizeof(struct net_device_stats)); - for_each_cpu(i) { + for_each_possible_cpu(i) { struct net_device_stats *lb_stats; lb_stats = &per_cpu(loopback_stats, i); diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 0fede50abd3e..8e9b1a537dee 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -1828,10 +1828,10 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ int using_dac = 0; /* See if we can set the dma mask early on; failure is fatal. */ - if (sizeof(dma_addr_t) == 8 && - !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) { + if (sizeof(dma_addr_t) == 8 && + !pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) { using_dac = 1; - } else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { + } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { using_dac = 0; } else { printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n"); diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 253440a98022..b82191d2bee1 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -1693,7 +1693,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs * * Process receive interrupt events, * put buffer to higher layer and refill buffer pool - * Note: This fucntion is called by interrupt handler, + * Note: This function is called by interrupt handler, * don't do "too much" work here */ @@ -1840,7 +1840,7 @@ static int sis900_rx(struct net_device *net_dev) * * Check for error condition and free socket buffer etc * schedule for more transmission as needed - * Note: This fucntion is called by interrupt handler, + * Note: This function is called by interrupt handler, * don't do "too much" work here */ @@ -2283,7 +2283,7 @@ static void set_rx_mode(struct net_device *net_dev) int i, table_entries; u32 rx_mode; - /* 635 Hash Table entires = 256(2^16) */ + /* 635 Hash Table entries = 256(2^16) */ if((sis_priv->chipset_rev >= SIS635A_900_REV) || (sis_priv->chipset_rev == SIS900B_900_REV)) table_entries = 16; diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index ee48bfd67349..d1a86a080a65 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -513,7 +513,7 @@ struct mii_phy { u_char *rst; /* Start of reset sequence in SROM */ u_int mc; /* Media Capabilities */ u_int ana; /* NWay Advertisement */ - u_int fdx; /* Full DupleX capabilites for each media */ + u_int fdx; /* Full DupleX capabilities for each media */ u_int ttm; /* Transmit Threshold Mode for each media */ u_int mci; /* 21142 MII Connector Interrupt info */ }; diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c index 55f4a9a631bc..ab985023fcca 100644 --- a/drivers/net/tulip/pnic2.c +++ b/drivers/net/tulip/pnic2.c @@ -199,7 +199,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5) /* negotiation ended successfully */ /* get the link partners reply and mask out all but - * bits 24-21 which show the partners capabilites + * bits 24-21 which show the partners capabilities * and match those to what we advertised * * then begin to interpret the results of the negotiation. diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index cde35dd87906..c1ce87a5f8d3 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -208,7 +208,7 @@ static const struct typhoon_card_info typhoon_card_info[] __devinitdata = { }; /* Notes on the new subsystem numbering scheme: - * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES + * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES * bit 4 indicates if this card has secured firmware (we don't support it) * bit 8 indicates if this is a (0) copper or (1) fiber card * bits 12-16 indicate card type: (0) client and (1) server @@ -788,7 +788,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) /* we have two rings to choose from, but we only use txLo for now * If we start using the Hi ring as well, we'll need to update * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), - * and TXHI_ENTIRES to match, as well as update the TSO code below + * and TXHI_ENTRIES to match, as well as update the TSO code below * to get the right DMA address */ txRing = &tp->txLoRing; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 1ff5de076d21..4505540e3c59 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -105,6 +105,7 @@ #include <linux/delay.h> #include <net/syncppp.h> #include <linux/hdlc.h> +#include <linux/mutex.h> /* Version */ static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; @@ -112,7 +113,7 @@ static int debug; static int quartz; #ifdef CONFIG_DSCC4_PCI_RST -static DECLARE_MUTEX(dscc4_sem); +static DEFINE_MUTEX(dscc4_mutex); static u32 dscc4_pci_config_store[16]; #endif @@ -1018,7 +1019,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) { int i; - down(&dscc4_sem); + mutex_lock(&dscc4_mutex); for (i = 0; i < 16; i++) pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); @@ -1039,7 +1040,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) for (i = 0; i < 16; i++) pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); - up(&dscc4_sem); + mutex_unlock(&dscc4_mutex); } #else #define dscc4_pci_reset(pdev,ioaddr) do {} while (0) diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 9d3b51c3ef54..29a756dd979b 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -577,8 +577,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, We set both dma_mask and consistent_dma_mask to 28 bits and pray pci_alloc_consistent() will use this info. It should work on most platforms */ - if (pci_set_consistent_dma_mask(pdev, 0x0FFFFFFF) || - pci_set_dma_mask(pdev, 0x0FFFFFFF)) { + if (pci_set_consistent_dma_mask(pdev, DMA_28BIT_MASK) || + pci_set_dma_mask(pdev, DMA_28BIT_MASK)) { printk(KERN_ERR "wanXL: No usable DMA configuration\n"); return -EIO; } diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 6fd0bf736830..8dfdfbd5966c 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -3858,7 +3858,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev, unsigned long flags; /* Note : you may have realised that, as this is a SET operation, - * this is priviledged and therefore a normal user can't + * this is privileged and therefore a normal user can't * perform scanning. * This is not an error, while the device perform scanning, * traffic doesn't flow, so it's a perfect DoS... diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index e5bb9f5ae429..989599ad33ef 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -747,7 +747,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, if (essid->length) { dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ - /* if it is to big, trunk it */ + /* if it is too big, trunk it */ dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length); } else { dwrq->flags = 0; diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c index b41d666fea3c..bfa0cc319a09 100644 --- a/drivers/net/wireless/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/prism54/islpci_hotplug.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/init.h> /* For __init, __exit */ +#include <linux/dma-mapping.h> #include "prismcompat.h" #include "islpci_dev.h" @@ -124,7 +125,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* enable PCI DMA */ - if (pci_set_dma_mask(pdev, 0xffffffff)) { + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 330d3869b41e..fc4bc9b94c74 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -217,11 +217,10 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) cpu_buf->tracing = 0; } -void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) +void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; - unsigned long pc = profile_pc(regs); - int is_kernel = !user_mode(regs); if (!backtrace_depth) { log_sample(cpu_buf, pc, is_kernel, event); @@ -238,6 +237,14 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) oprofile_end_trace(cpu_buf); } +void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) +{ + int is_kernel = !user_mode(regs); + unsigned long pc = profile_pc(regs); + + oprofile_add_ext_sample(pc, regs, event, is_kernel); +} + void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index e94b1e4a2a84..f0acb661c253 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -22,7 +22,7 @@ void oprofile_reset_stats(void) struct oprofile_cpu_buffer * cpu_buf; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { cpu_buf = &cpu_buffer[i]; cpu_buf->sample_received = 0; cpu_buf->sample_lost_overflow = 0; @@ -46,7 +46,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root) if (!dir) return; - for_each_cpu(i) { + for_each_possible_cpu(i) { cpu_buf = &cpu_buffer[i]; snprintf(buf, 10, "cpu%d", i); cpudir = oprofilefs_mkdir(sb, dir, buf); diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index d6bae699749a..b62da9b0cbf0 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -130,7 +130,7 @@ static struct file_operations ulong_ro_fops = { static struct dentry * __oprofilefs_create_file(struct super_block * sb, - struct dentry * root, char const * name, struct file_operations * fops, + struct dentry * root, char const * name, const struct file_operations * fops, int perm) { struct dentry * dentry; @@ -203,7 +203,7 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, int oprofilefs_create_file(struct super_block * sb, struct dentry * root, - char const * name, struct file_operations * fops) + char const * name, const struct file_operations * fops) { if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) return -EFAULT; @@ -212,7 +212,7 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root, int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, - char const * name, struct file_operations * fops, int perm) + char const * name, const struct file_operations * fops, int perm) { if (!__oprofilefs_create_file(sb, root, name, fops, perm)) return -EFAULT; diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 3627a2d7f79f..298f2ddb2c17 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -499,11 +499,16 @@ static int led_halt(struct notifier_block *, unsigned long, void *); static struct notifier_block led_notifier = { .notifier_call = led_halt, }; +static int notifier_disabled = 0; static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) { char *txt; - + + if (notifier_disabled) + return NOTIFY_OK; + + notifier_disabled = 1; switch (event) { case SYS_RESTART: txt = "SYSTEM RESTART"; break; @@ -527,7 +532,6 @@ static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) if (led_func_ptr) led_func_ptr(0xff); /* turn all LEDs ON */ - unregister_reboot_notifier(&led_notifier); return NOTIFY_OK; } @@ -758,6 +762,12 @@ not_found: return 1; } +static void __exit led_exit(void) +{ + unregister_reboot_notifier(&led_notifier); + return; +} + #ifdef CONFIG_PROC_FS module_init(led_create_procfs) #endif diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c index 54b2b7f20b96..0bcab83b4080 100644 --- a/drivers/parisc/power.c +++ b/drivers/parisc/power.c @@ -251,7 +251,8 @@ static int __init power_init(void) } /* Register a call for panic conditions. */ - notifier_chain_register(&panic_notifier_list, &parisc_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, + &parisc_panic_block); tasklet_enable(&power_tasklet); @@ -264,7 +265,8 @@ static void __exit power_exit(void) return; tasklet_disable(&power_tasklet); - notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); + atomic_notifier_chain_unregister(&panic_notifier_list, + &parisc_panic_block); power_tasklet.func = NULL; pdc_soft_power_button(0); } diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 9302b8fd7461..d5890027f8af 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -3126,9 +3126,9 @@ parport_pc_find_isa_ports (int autoirq, int autodma) * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO */ -static int __init parport_pc_find_ports (int autoirq, int autodma) +static void __init parport_pc_find_ports (int autoirq, int autodma) { - int count = 0, r; + int count = 0, err; #ifdef CONFIG_PARPORT_PC_SUPERIO detect_and_report_winbond (); @@ -3140,23 +3140,17 @@ static int __init parport_pc_find_ports (int autoirq, int autodma) /* PnP ports, skip detection if SuperIO already found them */ if (!count) { - r = pnp_register_driver (&parport_pc_pnp_driver); - if (r >= 0) { + err = pnp_register_driver (&parport_pc_pnp_driver); + if (!err) pnp_registered_parport = 1; - count += r; - } } /* ISA ports and whatever (see asm/parport.h). */ - count += parport_pc_find_nonpci_ports (autoirq, autodma); - - r = pci_register_driver (&parport_pc_pci_driver); - if (r) - return r; - pci_registered_parport = 1; - count += 1; + parport_pc_find_nonpci_ports (autoirq, autodma); - return count; + err = pci_register_driver (&parport_pc_pci_driver); + if (!err) + pci_registered_parport = 1; } /* @@ -3381,8 +3375,6 @@ __setup("parport_init_mode=",parport_init_mode_setup); static int __init parport_pc_init(void) { - int count = 0; - if (parse_parport_params()) return -EINVAL; @@ -3395,12 +3387,11 @@ static int __init parport_pc_init(void) break; if ((io_hi[i]) == PARPORT_IOHI_AUTO) io_hi[i] = 0x400 + io[i]; - if (parport_pc_probe_port(io[i], io_hi[i], - irqval[i], dmaval[i], NULL)) - count++; + parport_pc_probe_port(io[i], io_hi[i], + irqval[i], dmaval[i], NULL); } } else - count += parport_pc_find_ports (irqval[0], dmaval[0]); + parport_pc_find_ports (irqval[0], dmaval[0]); return 0; } diff --git a/drivers/parport/share.c b/drivers/parport/share.c index ea62bed6bc83..bbbfd79adbaf 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -32,6 +32,7 @@ #include <linux/kmod.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <asm/irq.h> #undef PARPORT_PARANOID @@ -50,7 +51,7 @@ static DEFINE_SPINLOCK(full_list_lock); static LIST_HEAD(drivers); -static DECLARE_MUTEX(registration_lock); +static DEFINE_MUTEX(registration_lock); /* What you can do to a port that's gone away.. */ static void dead_write_lines (struct parport *p, unsigned char b){} @@ -158,11 +159,11 @@ int parport_register_driver (struct parport_driver *drv) if (list_empty(&portlist)) get_lowlevel_driver (); - down(®istration_lock); + mutex_lock(®istration_lock); list_for_each_entry(port, &portlist, list) drv->attach(port); list_add(&drv->list, &drivers); - up(®istration_lock); + mutex_unlock(®istration_lock); return 0; } @@ -188,11 +189,11 @@ void parport_unregister_driver (struct parport_driver *drv) { struct parport *port; - down(®istration_lock); + mutex_lock(®istration_lock); list_del_init(&drv->list); list_for_each_entry(port, &portlist, list) drv->detach(port); - up(®istration_lock); + mutex_unlock(®istration_lock); } static void free_port (struct parport *port) @@ -366,7 +367,7 @@ void parport_announce_port (struct parport *port) #endif parport_proc_register(port); - down(®istration_lock); + mutex_lock(®istration_lock); spin_lock_irq(&parportlist_lock); list_add_tail(&port->list, &portlist); for (i = 1; i < 3; i++) { @@ -383,7 +384,7 @@ void parport_announce_port (struct parport *port) if (slave) attach_driver_chain(slave); } - up(®istration_lock); + mutex_unlock(®istration_lock); } /** @@ -409,7 +410,7 @@ void parport_remove_port(struct parport *port) { int i; - down(®istration_lock); + mutex_lock(®istration_lock); /* Spread the word. */ detach_driver_chain (port); @@ -436,7 +437,7 @@ void parport_remove_port(struct parport *port) } spin_unlock(&parportlist_lock); - up(®istration_lock); + mutex_unlock(®istration_lock); parport_proc_unregister(port); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 3eefe2cec72d..46825fee3ae4 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -19,7 +19,7 @@ #include <linux/string.h> #include <asm/pci-bridge.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/rtas.h> #include <asm/vio.h> @@ -27,7 +27,7 @@ #include "rpaphp.h" #include "rpadlpar.h" -static DECLARE_MUTEX(rpadlpar_sem); +static DEFINE_MUTEX(rpadlpar_mutex); #define DLPAR_MODULE_NAME "rpadlpar_io" @@ -300,7 +300,7 @@ int dlpar_add_slot(char *drc_name) int node_type; int rc = -EIO; - if (down_interruptible(&rpadlpar_sem)) + if (mutex_lock_interruptible(&rpadlpar_mutex)) return -ERESTARTSYS; /* Find newly added node */ @@ -324,7 +324,7 @@ int dlpar_add_slot(char *drc_name) printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: - up(&rpadlpar_sem); + mutex_unlock(&rpadlpar_mutex); return rc; } @@ -417,7 +417,7 @@ int dlpar_remove_slot(char *drc_name) int node_type; int rc = 0; - if (down_interruptible(&rpadlpar_sem)) + if (mutex_lock_interruptible(&rpadlpar_mutex)) return -ERESTARTSYS; dn = find_dlpar_node(drc_name, &node_type); @@ -439,7 +439,7 @@ int dlpar_remove_slot(char *drc_name) } printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); exit: - up(&rpadlpar_sem); + mutex_unlock(&rpadlpar_mutex); return rc; } diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index c402da8e78ae..8cb9abde736b 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -15,6 +15,7 @@ #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/types.h> +#include <linux/mutex.h> #include <asm/sn/addrs.h> #include <asm/sn/l1.h> @@ -81,7 +82,7 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { .get_power_status = get_power_status, }; -static DECLARE_MUTEX(sn_hotplug_sem); +static DEFINE_MUTEX(sn_hotplug_mutex); static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, char *buf) @@ -346,7 +347,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) int rc; /* Serialize the Linux PCI infrastructure */ - down(&sn_hotplug_sem); + mutex_lock(&sn_hotplug_mutex); /* * Power-on and initialize the slot in the SN @@ -354,7 +355,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) */ rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); if (rc) { - up(&sn_hotplug_sem); + mutex_unlock(&sn_hotplug_mutex); return rc; } @@ -362,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) PCI_DEVFN(slot->device_num + 1, 0)); if (!num_funcs) { dev_dbg(slot->pci_bus->self, "no device in slot\n"); - up(&sn_hotplug_sem); + mutex_unlock(&sn_hotplug_mutex); return -ENODEV; } @@ -402,7 +403,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) if (new_ppb) pci_bus_add_devices(new_bus); - up(&sn_hotplug_sem); + mutex_unlock(&sn_hotplug_mutex); if (rc == 0) dev_dbg(slot->pci_bus->self, @@ -422,7 +423,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) int rc; /* Acquire update access to the bus */ - down(&sn_hotplug_sem); + mutex_lock(&sn_hotplug_mutex); /* is it okay to bring this slot down? */ rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, @@ -450,7 +451,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) PCI_REQ_SLOT_DISABLE); leaving: /* Release the bus lock */ - up(&sn_hotplug_sem); + mutex_unlock(&sn_hotplug_mutex); return rc; } @@ -462,9 +463,9 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot, struct pcibus_info *pcibus_info; pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); - down(&sn_hotplug_sem); + mutex_lock(&sn_hotplug_mutex); *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); - up(&sn_hotplug_sem); + mutex_unlock(&sn_hotplug_mutex); return 0; } diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c index b68eef251614..bb19c64073c6 100644 --- a/drivers/pnp/card.c +++ b/drivers/pnp/card.c @@ -47,7 +47,7 @@ static void card_remove(struct pnp_dev * dev) { dev->card_link = NULL; } - + static void card_remove_first(struct pnp_dev * dev) { struct pnp_card_driver * drv = to_pnp_card_driver(dev->driver); @@ -361,7 +361,7 @@ static int card_resume(struct pnp_dev *dev) int pnp_register_card_driver(struct pnp_card_driver * drv) { - int count; + int error; struct list_head *pos, *temp; drv->link.name = drv->name; @@ -372,21 +372,19 @@ int pnp_register_card_driver(struct pnp_card_driver * drv) drv->link.suspend = drv->suspend ? card_suspend : NULL; drv->link.resume = drv->resume ? card_resume : NULL; - count = pnp_register_driver(&drv->link); - if (count < 0) - return count; + error = pnp_register_driver(&drv->link); + if (error < 0) + return error; spin_lock(&pnp_lock); list_add_tail(&drv->global_list, &pnp_card_drivers); spin_unlock(&pnp_lock); - count = 0; - list_for_each_safe(pos,temp,&pnp_cards){ struct pnp_card *card = list_entry(pos, struct pnp_card, global_list); - count += card_probe(card,drv); + card_probe(card,drv); } - return count; + return 0; } /** diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 7cafacdd12b0..e54c15383193 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -201,31 +201,14 @@ struct bus_type pnp_bus_type = { .resume = pnp_bus_resume, }; - -static int count_devices(struct device * dev, void * c) -{ - int * count = c; - (*count)++; - return 0; -} - int pnp_register_driver(struct pnp_driver *drv) { - int count; - pnp_dbg("the driver '%s' has been registered", drv->name); drv->driver.name = drv->name; drv->driver.bus = &pnp_bus_type; - count = driver_register(&drv->driver); - - /* get the number of initial matches */ - if (count >= 0){ - count = 0; - driver_for_each_device(&drv->driver, NULL, &count, count_devices); - } - return count; + return driver_register(&drv->driver); } void pnp_unregister_driver(struct pnp_driver *drv) diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index b1b4b683cbdd..ac7c2bb6c69e 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -42,6 +42,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/isapnp.h> +#include <linux/mutex.h> #include <asm/io.h> #if 0 @@ -92,7 +93,7 @@ MODULE_LICENSE("GPL"); #define _LTAG_FIXEDMEM32RANGE 0x86 static unsigned char isapnp_checksum_value; -static DECLARE_MUTEX(isapnp_cfg_mutex); +static DEFINE_MUTEX(isapnp_cfg_mutex); static int isapnp_detected; static int isapnp_csn_count; @@ -903,7 +904,7 @@ int isapnp_cfg_begin(int csn, int logdev) { if (csn < 1 || csn > isapnp_csn_count || logdev > 10) return -EINVAL; - down(&isapnp_cfg_mutex); + mutex_lock(&isapnp_cfg_mutex); isapnp_wait(); isapnp_key(); isapnp_wake(csn); @@ -929,7 +930,7 @@ int isapnp_cfg_begin(int csn, int logdev) int isapnp_cfg_end(void) { isapnp_wait(); - up(&isapnp_cfg_mutex); + mutex_unlock(&isapnp_cfg_mutex); return 0; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig new file mode 100644 index 000000000000..929dd8090578 --- /dev/null +++ b/drivers/rtc/Kconfig @@ -0,0 +1,165 @@ +\# +# RTC class/drivers configuration +# + +menu "Real Time Clock" + +config RTC_LIB + tristate + +config RTC_CLASS + tristate "RTC class" + depends on EXPERIMENTAL + default n + select RTC_LIB + help + Generic RTC class support. If you say yes here, you will + be allowed to plug one or more RTCs to your system. You will + probably want to enable one of more of the interfaces below. + + This driver can also be built as a module. If so, the module + will be called rtc-class. + +config RTC_HCTOSYS + bool "Set system time from RTC on startup" + depends on RTC_CLASS = y + default y + help + If you say yes here, the system time will be set using + the value read from the specified RTC device. This is useful + in order to avoid unnecessary fschk runs. + +config RTC_HCTOSYS_DEVICE + string "The RTC to read the time from" + depends on RTC_HCTOSYS = y + default "rtc0" + help + The RTC device that will be used as the source for + the system time, usually rtc0. + +comment "RTC interfaces" + depends on RTC_CLASS + +config RTC_INTF_SYSFS + tristate "sysfs" + depends on RTC_CLASS && SYSFS + default RTC_CLASS + help + Say yes here if you want to use your RTC using the sysfs + interface, /sys/class/rtc/rtcX . + + This driver can also be built as a module. If so, the module + will be called rtc-sysfs. + +config RTC_INTF_PROC + tristate "proc" + depends on RTC_CLASS && PROC_FS + default RTC_CLASS + help + Say yes here if you want to use your RTC using the proc + interface, /proc/driver/rtc . + + This driver can also be built as a module. If so, the module + will be called rtc-proc. + +config RTC_INTF_DEV + tristate "dev" + depends on RTC_CLASS + default RTC_CLASS + help + Say yes here if you want to use your RTC using the dev + interface, /dev/rtc . + + This driver can also be built as a module. If so, the module + will be called rtc-dev. + +comment "RTC drivers" + depends on RTC_CLASS + +config RTC_DRV_X1205 + tristate "Xicor/Intersil X1205" + depends on RTC_CLASS && I2C + help + If you say yes here you get support for the + Xicor/Intersil X1205 RTC chip. + + This driver can also be built as a module. If so, the module + will be called rtc-x1205. + +config RTC_DRV_DS1672 + tristate "Dallas/Maxim DS1672" + depends on RTC_CLASS && I2C + help + If you say yes here you get support for the + Dallas/Maxim DS1672 timekeeping chip. + + This driver can also be built as a module. If so, the module + will be called rtc-ds1672. + +config RTC_DRV_PCF8563 + tristate "Philips PCF8563/Epson RTC8564" + depends on RTC_CLASS && I2C + help + If you say yes here you get support for the + Philips PCF8563 RTC chip. The Epson RTC8564 + should work as well. + + This driver can also be built as a module. If so, the module + will be called rtc-pcf8563. + +config RTC_DRV_RS5C372 + tristate "Ricoh RS5C372A/B" + depends on RTC_CLASS && I2C + help + If you say yes here you get support for the + Ricoh RS5C372A and RS5C372B RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-rs5c372. + +config RTC_DRV_M48T86 + tristate "ST M48T86/Dallas DS12887" + depends on RTC_CLASS + help + If you say Y here you will get support for the + ST M48T86 and Dallas DS12887 RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-m48t86. + +config RTC_DRV_EP93XX + tristate "Cirrus Logic EP93XX" + depends on RTC_CLASS && ARCH_EP93XX + help + If you say yes here you get support for the + RTC embedded in the Cirrus Logic EP93XX processors. + + This driver can also be built as a module. If so, the module + will be called rtc-ep93xx. + +config RTC_DRV_SA1100 + tristate "SA11x0/PXA2xx" + depends on RTC_CLASS && (ARCH_SA1100 || ARCH_PXA) + help + If you say Y here you will get access to the real time clock + built into your SA11x0 or PXA2xx CPU. + + To compile this driver as a module, choose M here: the + module will be called rtc-sa1100. + +config RTC_DRV_TEST + tristate "Test driver/device" + depends on RTC_CLASS + help + If you say yes here you get support for the + RTC test driver. It's a software RTC which can be + used to test the RTC subsystem APIs. It gets + the time from the system clock. + You want this driver only if you are doing development + on the RTC subsystem. Please read the source code + for further details. + + This driver can also be built as a module. If so, the module + will be called rtc-test. + +endmenu diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile new file mode 100644 index 000000000000..8d4c7fe88d58 --- /dev/null +++ b/drivers/rtc/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for RTC class/drivers. +# + +obj-$(CONFIG_RTC_LIB) += rtc-lib.o +obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o +obj-$(CONFIG_RTC_CLASS) += rtc-core.o +rtc-core-y := class.o interface.o + +obj-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o +obj-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o +obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o + +obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o +obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o +obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o +obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o +obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o +obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o +obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o +obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c new file mode 100644 index 000000000000..8533936d50d8 --- /dev/null +++ b/drivers/rtc/class.c @@ -0,0 +1,145 @@ +/* + * RTC subsystem, base class + * + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * class skeleton from drivers/hwmon/hwmon.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/kdev_t.h> +#include <linux/idr.h> + +static DEFINE_IDR(rtc_idr); +static DEFINE_MUTEX(idr_lock); +struct class *rtc_class; + +static void rtc_device_release(struct class_device *class_dev) +{ + struct rtc_device *rtc = to_rtc_device(class_dev); + mutex_lock(&idr_lock); + idr_remove(&rtc_idr, rtc->id); + mutex_unlock(&idr_lock); + kfree(rtc); +} + +/** + * rtc_device_register - register w/ RTC class + * @dev: the device to register + * + * rtc_device_unregister() must be called when the class device is no + * longer needed. + * + * Returns the pointer to the new struct class device. + */ +struct rtc_device *rtc_device_register(const char *name, struct device *dev, + struct rtc_class_ops *ops, + struct module *owner) +{ + struct rtc_device *rtc; + int id, err; + + if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) { + err = -ENOMEM; + goto exit; + } + + + mutex_lock(&idr_lock); + err = idr_get_new(&rtc_idr, NULL, &id); + mutex_unlock(&idr_lock); + + if (err < 0) + goto exit; + + id = id & MAX_ID_MASK; + + rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL); + if (rtc == NULL) { + err = -ENOMEM; + goto exit_idr; + } + + rtc->id = id; + rtc->ops = ops; + rtc->owner = owner; + rtc->class_dev.dev = dev; + rtc->class_dev.class = rtc_class; + rtc->class_dev.release = rtc_device_release; + + mutex_init(&rtc->ops_lock); + spin_lock_init(&rtc->irq_lock); + spin_lock_init(&rtc->irq_task_lock); + + strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); + snprintf(rtc->class_dev.class_id, BUS_ID_SIZE, "rtc%d", id); + + err = class_device_register(&rtc->class_dev); + if (err) + goto exit_kfree; + + dev_info(dev, "rtc core: registered %s as %s\n", + rtc->name, rtc->class_dev.class_id); + + return rtc; + +exit_kfree: + kfree(rtc); + +exit_idr: + idr_remove(&rtc_idr, id); + +exit: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(rtc_device_register); + + +/** + * rtc_device_unregister - removes the previously registered RTC class device + * + * @rtc: the RTC class device to destroy + */ +void rtc_device_unregister(struct rtc_device *rtc) +{ + mutex_lock(&rtc->ops_lock); + rtc->ops = NULL; + mutex_unlock(&rtc->ops_lock); + class_device_unregister(&rtc->class_dev); +} +EXPORT_SYMBOL_GPL(rtc_device_unregister); + +int rtc_interface_register(struct class_interface *intf) +{ + intf->class = rtc_class; + return class_interface_register(intf); +} +EXPORT_SYMBOL_GPL(rtc_interface_register); + +static int __init rtc_init(void) +{ + rtc_class = class_create(THIS_MODULE, "rtc"); + if (IS_ERR(rtc_class)) { + printk(KERN_ERR "%s: couldn't create class\n", __FILE__); + return PTR_ERR(rtc_class); + } + return 0; +} + +static void __exit rtc_exit(void) +{ + class_destroy(rtc_class); +} + +module_init(rtc_init); +module_exit(rtc_exit); + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towerteh.it>"); +MODULE_DESCRIPTION("RTC class support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c new file mode 100644 index 000000000000..d02fe9a0001f --- /dev/null +++ b/drivers/rtc/hctosys.c @@ -0,0 +1,69 @@ +/* + * RTC subsystem, initialize system time on startup + * + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/rtc.h> + +/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary + * whether it stores the most close value or the value with partial + * seconds truncated. However, it is important that we use it to store + * the truncated value. This is because otherwise it is necessary, + * in an rtc sync function, to read both xtime.tv_sec and + * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read + * of >32bits is not possible. So storing the most close value would + * slow down the sync API. So here we have the truncated value and + * the best guess is to add 0.5s. + */ + +static int __init rtc_hctosys(void) +{ + int err; + struct rtc_time tm; + struct class_device *class_dev = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + + if (class_dev == NULL) { + printk("%s: unable to open rtc device (%s)\n", + __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); + return -ENODEV; + } + + err = rtc_read_time(class_dev, &tm); + if (err == 0) { + err = rtc_valid_tm(&tm); + if (err == 0) { + struct timespec tv; + + tv.tv_nsec = NSEC_PER_SEC >> 1; + + rtc_tm_to_time(&tm, &tv.tv_sec); + + do_settimeofday(&tv); + + dev_info(class_dev->dev, + "setting the system clock to " + "%d-%02d-%02d %02d:%02d:%02d (%u)\n", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, + (unsigned int) tv.tv_sec); + } + else + dev_err(class_dev->dev, + "hctosys: invalid date/time\n"); + } + else + dev_err(class_dev->dev, + "hctosys: unable to read the hardware clock\n"); + + rtc_class_close(class_dev); + + return 0; +} + +late_initcall(rtc_hctosys); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c new file mode 100644 index 000000000000..56e490709b87 --- /dev/null +++ b/drivers/rtc/interface.c @@ -0,0 +1,277 @@ +/* + * RTC subsystem, interface functions + * + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * based on arch/arm/common/rtctime.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/rtc.h> + +int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm) +{ + int err; + struct rtc_device *rtc = to_rtc_device(class_dev); + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return -EBUSY; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->read_time) + err = -EINVAL; + else { + memset(tm, 0, sizeof(struct rtc_time)); + err = rtc->ops->read_time(class_dev->dev, tm); + } + + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_read_time); + +int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm) +{ + int err; + struct rtc_device *rtc = to_rtc_device(class_dev); + + err = rtc_valid_tm(tm); + if (err != 0) + return err; + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return -EBUSY; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_time) + err = -EINVAL; + else + err = rtc->ops->set_time(class_dev->dev, tm); + + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_set_time); + +int rtc_set_mmss(struct class_device *class_dev, unsigned long secs) +{ + int err; + struct rtc_device *rtc = to_rtc_device(class_dev); + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return -EBUSY; + + if (!rtc->ops) + err = -ENODEV; + else if (rtc->ops->set_mmss) + err = rtc->ops->set_mmss(class_dev->dev, secs); + else if (rtc->ops->read_time && rtc->ops->set_time) { + struct rtc_time new, old; + + err = rtc->ops->read_time(class_dev->dev, &old); + if (err == 0) { + rtc_time_to_tm(secs, &new); + + /* + * avoid writing when we're going to change the day of + * the month. We will retry in the next minute. This + * basically means that if the RTC must not drift + * by more than 1 minute in 11 minutes. + */ + if (!((old.tm_hour == 23 && old.tm_min == 59) || + (new.tm_hour == 23 && new.tm_min == 59))) + err = rtc->ops->set_time(class_dev->dev, &new); + } + } + else + err = -EINVAL; + + mutex_unlock(&rtc->ops_lock); + + return err; +} +EXPORT_SYMBOL_GPL(rtc_set_mmss); + +int rtc_read_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) +{ + int err; + struct rtc_device *rtc = to_rtc_device(class_dev); + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return -EBUSY; + + if (rtc->ops == NULL) + err = -ENODEV; + else if (!rtc->ops->read_alarm) + err = -EINVAL; + else { + memset(alarm, 0, sizeof(struct rtc_wkalrm)); + err = rtc->ops->read_alarm(class_dev->dev, alarm); + } + + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_read_alarm); + +int rtc_set_alarm(struct class_device *class_dev, struct rtc_wkalrm *alarm) +{ + int err; + struct rtc_device *rtc = to_rtc_device(class_dev); + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return -EBUSY; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_alarm) + err = -EINVAL; + else + err = rtc->ops->set_alarm(class_dev->dev, alarm); + + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_set_alarm); + +void rtc_update_irq(struct class_device *class_dev, + unsigned long num, unsigned long events) +{ + struct rtc_device *rtc = to_rtc_device(class_dev); + + spin_lock(&rtc->irq_lock); + rtc->irq_data = (rtc->irq_data + (num << 8)) | events; + spin_unlock(&rtc->irq_lock); + + spin_lock(&rtc->irq_task_lock); + if (rtc->irq_task) + rtc->irq_task->func(rtc->irq_task->private_data); + spin_unlock(&rtc->irq_task_lock); + + wake_up_interruptible(&rtc->irq_queue); + kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); +} +EXPORT_SYMBOL_GPL(rtc_update_irq); + +struct class_device *rtc_class_open(char *name) +{ + struct class_device *class_dev = NULL, + *class_dev_tmp; + + down(&rtc_class->sem); + list_for_each_entry(class_dev_tmp, &rtc_class->children, node) { + if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) { + class_dev = class_dev_tmp; + break; + } + } + + if (class_dev) { + if (!try_module_get(to_rtc_device(class_dev)->owner)) + class_dev = NULL; + } + up(&rtc_class->sem); + + return class_dev; +} +EXPORT_SYMBOL_GPL(rtc_class_open); + +void rtc_class_close(struct class_device *class_dev) +{ + module_put(to_rtc_device(class_dev)->owner); +} +EXPORT_SYMBOL_GPL(rtc_class_close); + +int rtc_irq_register(struct class_device *class_dev, struct rtc_task *task) +{ + int retval = -EBUSY; + struct rtc_device *rtc = to_rtc_device(class_dev); + + if (task == NULL || task->func == NULL) + return -EINVAL; + + spin_lock(&rtc->irq_task_lock); + if (rtc->irq_task == NULL) { + rtc->irq_task = task; + retval = 0; + } + spin_unlock(&rtc->irq_task_lock); + + return retval; +} +EXPORT_SYMBOL_GPL(rtc_irq_register); + +void rtc_irq_unregister(struct class_device *class_dev, struct rtc_task *task) +{ + struct rtc_device *rtc = to_rtc_device(class_dev); + + spin_lock(&rtc->irq_task_lock); + if (rtc->irq_task == task) + rtc->irq_task = NULL; + spin_unlock(&rtc->irq_task_lock); +} +EXPORT_SYMBOL_GPL(rtc_irq_unregister); + +int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int enabled) +{ + int err = 0; + unsigned long flags; + struct rtc_device *rtc = to_rtc_device(class_dev); + + spin_lock_irqsave(&rtc->irq_task_lock, flags); + if (rtc->irq_task != task) + err = -ENXIO; + spin_unlock_irqrestore(&rtc->irq_task_lock, flags); + + if (err == 0) + err = rtc->ops->irq_set_state(class_dev->dev, enabled); + + return err; +} +EXPORT_SYMBOL_GPL(rtc_irq_set_state); + +int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq) +{ + int err = 0, tmp = 0; + unsigned long flags; + struct rtc_device *rtc = to_rtc_device(class_dev); + + /* allowed range is 2-8192 */ + if (freq < 2 || freq > 8192) + return -EINVAL; +/* + FIXME: this does not belong here, will move where appropriate + at a later stage. It cannot hurt right now, trust me :) + if ((freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) + return -EACCES; +*/ + /* check if freq is a power of 2 */ + while (freq > (1 << tmp)) + tmp++; + + if (freq != (1 << tmp)) + return -EINVAL; + + spin_lock_irqsave(&rtc->irq_task_lock, flags); + if (rtc->irq_task != task) + err = -ENXIO; + spin_unlock_irqrestore(&rtc->irq_task_lock, flags); + + if (err == 0) { + err = rtc->ops->irq_set_freq(class_dev->dev, freq); + if (err == 0) + rtc->irq_freq = freq; + } + return err; +} diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c new file mode 100644 index 000000000000..b1e3e6179e56 --- /dev/null +++ b/drivers/rtc/rtc-dev.c @@ -0,0 +1,382 @@ +/* + * RTC subsystem, dev interface + * + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * based on arch/arm/common/rtctime.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/module.h> +#include <linux/rtc.h> + +static struct class *rtc_dev_class; +static dev_t rtc_devt; + +#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */ + +static int rtc_dev_open(struct inode *inode, struct file *file) +{ + int err; + struct rtc_device *rtc = container_of(inode->i_cdev, + struct rtc_device, char_dev); + struct rtc_class_ops *ops = rtc->ops; + + /* We keep the lock as long as the device is in use + * and return immediately if busy + */ + if (!(mutex_trylock(&rtc->char_lock))) + return -EBUSY; + + file->private_data = &rtc->class_dev; + + err = ops->open ? ops->open(rtc->class_dev.dev) : 0; + if (err == 0) { + spin_lock_irq(&rtc->irq_lock); + rtc->irq_data = 0; + spin_unlock_irq(&rtc->irq_lock); + + return 0; + } + + /* something has gone wrong, release the lock */ + mutex_unlock(&rtc->char_lock); + return err; +} + + +static ssize_t +rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct rtc_device *rtc = to_rtc_device(file->private_data); + + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t ret; + + if (count < sizeof(unsigned long)) + return -EINVAL; + + add_wait_queue(&rtc->irq_queue, &wait); + do { + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&rtc->irq_lock); + data = rtc->irq_data; + rtc->irq_data = 0; + spin_unlock_irq(&rtc->irq_lock); + + if (data != 0) { + ret = 0; + break; + } + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + schedule(); + } while (1); + set_current_state(TASK_RUNNING); + remove_wait_queue(&rtc->irq_queue, &wait); + + if (ret == 0) { + /* Check for any data updates */ + if (rtc->ops->read_callback) + data = rtc->ops->read_callback(rtc->class_dev.dev, data); + + ret = put_user(data, (unsigned long __user *)buf); + if (ret == 0) + ret = sizeof(unsigned long); + } + return ret; +} + +static unsigned int rtc_dev_poll(struct file *file, poll_table *wait) +{ + struct rtc_device *rtc = to_rtc_device(file->private_data); + unsigned long data; + + poll_wait(file, &rtc->irq_queue, wait); + + data = rtc->irq_data; + + return (data != 0) ? (POLLIN | POLLRDNORM) : 0; +} + +static int rtc_dev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct class_device *class_dev = file->private_data; + struct rtc_device *rtc = to_rtc_device(class_dev); + struct rtc_class_ops *ops = rtc->ops; + struct rtc_time tm; + struct rtc_wkalrm alarm; + void __user *uarg = (void __user *) arg; + + /* avoid conflicting IRQ users */ + if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) { + spin_lock(&rtc->irq_task_lock); + if (rtc->irq_task) + err = -EBUSY; + spin_unlock(&rtc->irq_task_lock); + + if (err < 0) + return err; + } + + /* try the driver's ioctl interface */ + if (ops->ioctl) { + err = ops->ioctl(class_dev->dev, cmd, arg); + if (err != -EINVAL) + return err; + } + + /* if the driver does not provide the ioctl interface + * or if that particular ioctl was not implemented + * (-EINVAL), we will try to emulate here. + */ + + switch (cmd) { + case RTC_ALM_READ: + err = rtc_read_alarm(class_dev, &alarm); + if (err < 0) + return err; + + if (copy_to_user(uarg, &alarm.time, sizeof(tm))) + return -EFAULT; + break; + + case RTC_ALM_SET: + if (copy_from_user(&alarm.time, uarg, sizeof(tm))) + return -EFAULT; + + alarm.enabled = 0; + alarm.pending = 0; + alarm.time.tm_mday = -1; + alarm.time.tm_mon = -1; + alarm.time.tm_year = -1; + alarm.time.tm_wday = -1; + alarm.time.tm_yday = -1; + alarm.time.tm_isdst = -1; + err = rtc_set_alarm(class_dev, &alarm); + break; + + case RTC_RD_TIME: + err = rtc_read_time(class_dev, &tm); + if (err < 0) + return err; + + if (copy_to_user(uarg, &tm, sizeof(tm))) + return -EFAULT; + break; + + case RTC_SET_TIME: + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&tm, uarg, sizeof(tm))) + return -EFAULT; + + err = rtc_set_time(class_dev, &tm); + break; +#if 0 + case RTC_EPOCH_SET: +#ifndef rtc_epoch + /* + * There were no RTC clocks before 1900. + */ + if (arg < 1900) { + err = -EINVAL; + break; + } + if (!capable(CAP_SYS_TIME)) { + err = -EACCES; + break; + } + rtc_epoch = arg; + err = 0; +#endif + break; + + case RTC_EPOCH_READ: + err = put_user(rtc_epoch, (unsigned long __user *)uarg); + break; +#endif + case RTC_WKALM_SET: + if (copy_from_user(&alarm, uarg, sizeof(alarm))) + return -EFAULT; + + err = rtc_set_alarm(class_dev, &alarm); + break; + + case RTC_WKALM_RD: + err = rtc_read_alarm(class_dev, &alarm); + if (err < 0) + return err; + + if (copy_to_user(uarg, &alarm, sizeof(alarm))) + return -EFAULT; + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int rtc_dev_release(struct inode *inode, struct file *file) +{ + struct rtc_device *rtc = to_rtc_device(file->private_data); + + if (rtc->ops->release) + rtc->ops->release(rtc->class_dev.dev); + + mutex_unlock(&rtc->char_lock); + return 0; +} + +static int rtc_dev_fasync(int fd, struct file *file, int on) +{ + struct rtc_device *rtc = to_rtc_device(file->private_data); + return fasync_helper(fd, file, on, &rtc->async_queue); +} + +static struct file_operations rtc_dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = rtc_dev_read, + .poll = rtc_dev_poll, + .ioctl = rtc_dev_ioctl, + .open = rtc_dev_open, + .release = rtc_dev_release, + .fasync = rtc_dev_fasync, +}; + +/* insertion/removal hooks */ + +static int rtc_dev_add_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + int err = 0; + struct rtc_device *rtc = to_rtc_device(class_dev); + + if (rtc->id >= RTC_DEV_MAX) { + dev_err(class_dev->dev, "too many RTCs\n"); + return -EINVAL; + } + + mutex_init(&rtc->char_lock); + spin_lock_init(&rtc->irq_lock); + init_waitqueue_head(&rtc->irq_queue); + + cdev_init(&rtc->char_dev, &rtc_dev_fops); + rtc->char_dev.owner = rtc->owner; + + if (cdev_add(&rtc->char_dev, MKDEV(MAJOR(rtc_devt), rtc->id), 1)) { + cdev_del(&rtc->char_dev); + dev_err(class_dev->dev, + "failed to add char device %d:%d\n", + MAJOR(rtc_devt), rtc->id); + return -ENODEV; + } + + rtc->rtc_dev = class_device_create(rtc_dev_class, NULL, + MKDEV(MAJOR(rtc_devt), rtc->id), + class_dev->dev, "rtc%d", rtc->id); + if (IS_ERR(rtc->rtc_dev)) { + dev_err(class_dev->dev, "cannot create rtc_dev device\n"); + err = PTR_ERR(rtc->rtc_dev); + goto err_cdev_del; + } + + dev_info(class_dev->dev, "rtc intf: dev (%d:%d)\n", + MAJOR(rtc->rtc_dev->devt), + MINOR(rtc->rtc_dev->devt)); + + return 0; + +err_cdev_del: + + cdev_del(&rtc->char_dev); + return err; +} + +static void rtc_dev_remove_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + struct rtc_device *rtc = to_rtc_device(class_dev); + + if (rtc->rtc_dev) { + dev_dbg(class_dev->dev, "removing char %d:%d\n", + MAJOR(rtc->rtc_dev->devt), + MINOR(rtc->rtc_dev->devt)); + + class_device_unregister(rtc->rtc_dev); + cdev_del(&rtc->char_dev); + } +} + +/* interface registration */ + +static struct class_interface rtc_dev_interface = { + .add = &rtc_dev_add_device, + .remove = &rtc_dev_remove_device, +}; + +static int __init rtc_dev_init(void) +{ + int err; + + rtc_dev_class = class_create(THIS_MODULE, "rtc-dev"); + if (IS_ERR(rtc_dev_class)) + return PTR_ERR(rtc_dev_class); + + err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc"); + if (err < 0) { + printk(KERN_ERR "%s: failed to allocate char dev region\n", + __FILE__); + goto err_destroy_class; + } + + err = rtc_interface_register(&rtc_dev_interface); + if (err < 0) { + printk(KERN_ERR "%s: failed to register the interface\n", + __FILE__); + goto err_unregister_chrdev; + } + + return 0; + +err_unregister_chrdev: + unregister_chrdev_region(rtc_devt, RTC_DEV_MAX); + +err_destroy_class: + class_destroy(rtc_dev_class); + + return err; +} + +static void __exit rtc_dev_exit(void) +{ + class_interface_unregister(&rtc_dev_interface); + class_destroy(rtc_dev_class); + unregister_chrdev_region(rtc_devt, RTC_DEV_MAX); +} + +module_init(rtc_dev_init); +module_exit(rtc_dev_exit); + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("RTC class dev interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c new file mode 100644 index 000000000000..358695a416f3 --- /dev/null +++ b/drivers/rtc/rtc-ds1672.c @@ -0,0 +1,233 @@ +/* + * An rtc/i2c driver for the Dallas DS1672 + * Copyright 2005 Alessandro Zummo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/rtc.h> + +#define DRV_VERSION "0.2" + +/* Addresses to scan: none. This chip cannot be detected. */ +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD; + +/* Registers */ + +#define DS1672_REG_CNT_BASE 0 +#define DS1672_REG_CONTROL 4 +#define DS1672_REG_TRICKLE 5 + + +/* Prototypes */ +static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind); + +/* + * In the routines that deal directly with the ds1672 hardware, we use + * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch + * Epoch is initialized as 2000. Time is set to UTC. + */ +static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + unsigned long time; + unsigned char addr = DS1672_REG_CNT_BASE; + unsigned char buf[4]; + + struct i2c_msg msgs[] = { + { client->addr, 0, 1, &addr }, /* setup read ptr */ + { client->addr, I2C_M_RD, 4, buf }, /* read date */ + }; + + /* read date registers */ + if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } + + dev_dbg(&client->dev, + "%s: raw read data - counters=%02x,%02x,%02x,%02x\n" + __FUNCTION__, + buf[0], buf[1], buf[2], buf[3]); + + time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + + rtc_time_to_tm(time, tm); + + dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + return 0; +} + +static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs) +{ + int xfer; + unsigned char buf[5]; + + buf[0] = DS1672_REG_CNT_BASE; + buf[1] = secs & 0x000000FF; + buf[2] = (secs & 0x0000FF00) >> 8; + buf[3] = (secs & 0x00FF0000) >> 16; + buf[4] = (secs & 0xFF000000) >> 24; + + xfer = i2c_master_send(client, buf, 5); + if (xfer != 5) { + dev_err(&client->dev, "%s: send: %d\n", __FUNCTION__, xfer); + return -EIO; + } + + return 0; +} + +static int ds1672_set_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + unsigned long secs; + + dev_dbg(&client->dev, + "%s: secs=%d, mins=%d, hours=%d, ", + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + rtc_tm_to_time(tm, &secs); + + return ds1672_set_mmss(client, secs); +} + +static int ds1672_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return ds1672_get_datetime(to_i2c_client(dev), tm); +} + +static int ds1672_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return ds1672_set_datetime(to_i2c_client(dev), tm); +} + +static int ds1672_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + return ds1672_set_mmss(to_i2c_client(dev), secs); +} + +static struct rtc_class_ops ds1672_rtc_ops = { + .read_time = ds1672_rtc_read_time, + .set_time = ds1672_rtc_set_time, + .set_mmss = ds1672_rtc_set_mmss, +}; + +static int ds1672_attach(struct i2c_adapter *adapter) +{ + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + return i2c_probe(adapter, &addr_data, ds1672_probe); +} + +static int ds1672_detach(struct i2c_client *client) +{ + int err; + struct rtc_device *rtc = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "%s\n", __FUNCTION__); + + if (rtc) + rtc_device_unregister(rtc); + + if ((err = i2c_detach_client(client))) + return err; + + kfree(client); + + return 0; +} + +static struct i2c_driver ds1672_driver = { + .driver = { + .name = "ds1672", + }, + .id = I2C_DRIVERID_DS1672, + .attach_adapter = &ds1672_attach, + .detach_client = &ds1672_detach, +}; + +static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind) +{ + int err = 0; + struct i2c_client *client; + struct rtc_device *rtc; + + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit; + } + + if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + + /* I2C client */ + client->addr = address; + client->driver = &ds1672_driver; + client->adapter = adapter; + + strlcpy(client->name, ds1672_driver.driver.name, I2C_NAME_SIZE); + + /* Inform the i2c layer */ + if ((err = i2c_attach_client(client))) + goto exit_kfree; + + dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); + + rtc = rtc_device_register(ds1672_driver.driver.name, &client->dev, + &ds1672_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + dev_err(&client->dev, + "unable to register the class device\n"); + goto exit_detach; + } + + i2c_set_clientdata(client, rtc); + + return 0; + +exit_detach: + i2c_detach_client(client); + +exit_kfree: + kfree(client); + +exit: + return err; +} + +static int __init ds1672_init(void) +{ + return i2c_add_driver(&ds1672_driver); +} + +static void __exit ds1672_exit(void) +{ + i2c_del_driver(&ds1672_driver); +} + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("Dallas/Maxim DS1672 timekeeper driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_init(ds1672_init); +module_exit(ds1672_exit); diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c new file mode 100644 index 000000000000..0dd80ea686a9 --- /dev/null +++ b/drivers/rtc/rtc-ep93xx.c @@ -0,0 +1,162 @@ +/* + * A driver for the RTC embedded in the Cirrus Logic EP93XX processors + * Copyright (c) 2006 Tower Technologies + * + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> +#include <asm/hardware.h> + +#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x)) +#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000) +#define EP93XX_RTC_LOAD EP93XX_RTC_REG(0x000C) +#define EP93XX_RTC_SWCOMP EP93XX_RTC_REG(0x0108) + +#define DRV_VERSION "0.2" + +static int ep93xx_get_swcomp(struct device *dev, unsigned short *preload, + unsigned short *delete) +{ + unsigned short comp = __raw_readl(EP93XX_RTC_SWCOMP); + + if (preload) + *preload = comp & 0xffff; + + if (delete) + *delete = (comp >> 16) & 0x1f; + + return 0; +} + +static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long time = __raw_readl(EP93XX_RTC_DATA); + + rtc_time_to_tm(time, tm); + return 0; +} + +static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + __raw_writel(secs + 1, EP93XX_RTC_LOAD); + return 0; +} + +static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + int err; + unsigned long secs; + + err = rtc_tm_to_time(tm, &secs); + if (err != 0) + return err; + + return ep93xx_rtc_set_mmss(dev, secs); +} + +static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq) +{ + unsigned short preload, delete; + + ep93xx_get_swcomp(dev, &preload, &delete); + + seq_printf(seq, "24hr\t\t: yes\n"); + seq_printf(seq, "preload\t\t: %d\n", preload); + seq_printf(seq, "delete\t\t: %d\n", delete); + + return 0; +} + +static struct rtc_class_ops ep93xx_rtc_ops = { + .read_time = ep93xx_rtc_read_time, + .set_time = ep93xx_rtc_set_time, + .set_mmss = ep93xx_rtc_set_mmss, + .proc = ep93xx_rtc_proc, +}; + +static ssize_t ep93xx_sysfs_show_comp_preload(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned short preload; + + ep93xx_get_swcomp(dev, &preload, NULL); + + return sprintf(buf, "%d\n", preload); +} +static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_sysfs_show_comp_preload, NULL); + +static ssize_t ep93xx_sysfs_show_comp_delete(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned short delete; + + ep93xx_get_swcomp(dev, NULL, &delete); + + return sprintf(buf, "%d\n", delete); +} +static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_sysfs_show_comp_delete, NULL); + + +static int __devinit ep93xx_rtc_probe(struct platform_device *dev) +{ + struct rtc_device *rtc = rtc_device_register("ep93xx", + &dev->dev, &ep93xx_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + dev_err(&dev->dev, "unable to register\n"); + return PTR_ERR(rtc); + } + + platform_set_drvdata(dev, rtc); + + device_create_file(&dev->dev, &dev_attr_comp_preload); + device_create_file(&dev->dev, &dev_attr_comp_delete); + + return 0; +} + +static int __devexit ep93xx_rtc_remove(struct platform_device *dev) +{ + struct rtc_device *rtc = platform_get_drvdata(dev); + + if (rtc) + rtc_device_unregister(rtc); + + platform_set_drvdata(dev, NULL); + + return 0; +} + +static struct platform_driver ep93xx_rtc_platform_driver = { + .driver = { + .name = "ep93xx-rtc", + .owner = THIS_MODULE, + }, + .probe = ep93xx_rtc_probe, + .remove = __devexit_p(ep93xx_rtc_remove), +}; + +static int __init ep93xx_rtc_init(void) +{ + return platform_driver_register(&ep93xx_rtc_platform_driver); +} + +static void __exit ep93xx_rtc_exit(void) +{ + platform_driver_unregister(&ep93xx_rtc_platform_driver); +} + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("EP93XX RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_init(ep93xx_rtc_init); +module_exit(ep93xx_rtc_exit); diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c new file mode 100644 index 000000000000..cfedc1d28ee1 --- /dev/null +++ b/drivers/rtc/rtc-lib.c @@ -0,0 +1,101 @@ +/* + * rtc and date/time utility functions + * + * Copyright (C) 2005-06 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * based on arch/arm/common/rtctime.c and other bits + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/module.h> +#include <linux/rtc.h> + +static const unsigned char rtc_days_in_month[] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 +}; + +#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400) +#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400)) + +int rtc_month_days(unsigned int month, unsigned int year) +{ + return rtc_days_in_month[month] + (LEAP_YEAR(year) && month == 1); +} +EXPORT_SYMBOL(rtc_month_days); + +/* + * Convert seconds since 01-01-1970 00:00:00 to Gregorian date. + */ +void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) +{ + register int days, month, year; + + days = time / 86400; + time -= days * 86400; + + /* day of the week, 1970-01-01 was a Thursday */ + tm->tm_wday = (days + 4) % 7; + + year = 1970 + days / 365; + days -= (year - 1970) * 365 + + LEAPS_THRU_END_OF(year - 1) + - LEAPS_THRU_END_OF(1970 - 1); + if (days < 0) { + year -= 1; + days += 365 + LEAP_YEAR(year); + } + tm->tm_year = year - 1900; + tm->tm_yday = days + 1; + + for (month = 0; month < 11; month++) { + int newdays; + + newdays = days - rtc_month_days(month, year); + if (newdays < 0) + break; + days = newdays; + } + tm->tm_mon = month; + tm->tm_mday = days + 1; + + tm->tm_hour = time / 3600; + time -= tm->tm_hour * 3600; + tm->tm_min = time / 60; + tm->tm_sec = time - tm->tm_min * 60; +} +EXPORT_SYMBOL(rtc_time_to_tm); + +/* + * Does the rtc_time represent a valid date/time? + */ +int rtc_valid_tm(struct rtc_time *tm) +{ + if (tm->tm_year < 70 + || tm->tm_mon >= 12 + || tm->tm_mday < 1 + || tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year + 1900) + || tm->tm_hour >= 24 + || tm->tm_min >= 60 + || tm->tm_sec >= 60) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(rtc_valid_tm); + +/* + * Convert Gregorian date to seconds since 01-01-1970 00:00:00. + */ +int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) +{ + *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + return 0; +} +EXPORT_SYMBOL(rtc_tm_to_time); + +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c new file mode 100644 index 000000000000..db445c872b1b --- /dev/null +++ b/drivers/rtc/rtc-m48t86.c @@ -0,0 +1,209 @@ +/* + * ST M48T86 / Dallas DS12887 RTC driver + * Copyright (c) 2006 Tower Technologies + * + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This drivers only supports the clock running in BCD and 24H mode. + * If it will be ever adapted to binary and 12H mode, care must be taken + * to not introduce bugs. + */ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> +#include <linux/m48t86.h> +#include <linux/bcd.h> + +#define M48T86_REG_SEC 0x00 +#define M48T86_REG_SECALRM 0x01 +#define M48T86_REG_MIN 0x02 +#define M48T86_REG_MINALRM 0x03 +#define M48T86_REG_HOUR 0x04 +#define M48T86_REG_HOURALRM 0x05 +#define M48T86_REG_DOW 0x06 /* 1 = sunday */ +#define M48T86_REG_DOM 0x07 +#define M48T86_REG_MONTH 0x08 /* 1 - 12 */ +#define M48T86_REG_YEAR 0x09 /* 0 - 99 */ +#define M48T86_REG_A 0x0A +#define M48T86_REG_B 0x0B +#define M48T86_REG_C 0x0C +#define M48T86_REG_D 0x0D + +#define M48T86_REG_B_H24 (1 << 1) +#define M48T86_REG_B_DM (1 << 2) +#define M48T86_REG_B_SET (1 << 7) +#define M48T86_REG_D_VRT (1 << 7) + +#define DRV_VERSION "0.1" + + +static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char reg; + struct platform_device *pdev = to_platform_device(dev); + struct m48t86_ops *ops = pdev->dev.platform_data; + + reg = ops->readb(M48T86_REG_B); + + if (reg & M48T86_REG_B_DM) { + /* data (binary) mode */ + tm->tm_sec = ops->readb(M48T86_REG_SEC); + tm->tm_min = ops->readb(M48T86_REG_MIN); + tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F; + tm->tm_mday = ops->readb(M48T86_REG_DOM); + /* tm_mon is 0-11 */ + tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1; + tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100; + tm->tm_wday = ops->readb(M48T86_REG_DOW); + } else { + /* bcd mode */ + tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC)); + tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN)); + tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F); + tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM)); + /* tm_mon is 0-11 */ + tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1; + tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100; + tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW)); + } + + /* correct the hour if the clock is in 12h mode */ + if (!(reg & M48T86_REG_B_H24)) + if (ops->readb(M48T86_REG_HOUR) & 0x80) + tm->tm_hour += 12; + + return 0; +} + +static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char reg; + struct platform_device *pdev = to_platform_device(dev); + struct m48t86_ops *ops = pdev->dev.platform_data; + + reg = ops->readb(M48T86_REG_B); + + /* update flag and 24h mode */ + reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; + ops->writeb(reg, M48T86_REG_B); + + if (reg & M48T86_REG_B_DM) { + /* data (binary) mode */ + ops->writeb(tm->tm_sec, M48T86_REG_SEC); + ops->writeb(tm->tm_min, M48T86_REG_MIN); + ops->writeb(tm->tm_hour, M48T86_REG_HOUR); + ops->writeb(tm->tm_mday, M48T86_REG_DOM); + ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH); + ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR); + ops->writeb(tm->tm_wday, M48T86_REG_DOW); + } else { + /* bcd mode */ + ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); + ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN); + ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); + ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); + ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); + ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); + ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); + } + + /* update ended */ + reg &= ~M48T86_REG_B_SET; + ops->writeb(reg, M48T86_REG_B); + + return 0; +} + +static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq) +{ + unsigned char reg; + struct platform_device *pdev = to_platform_device(dev); + struct m48t86_ops *ops = pdev->dev.platform_data; + + reg = ops->readb(M48T86_REG_B); + + seq_printf(seq, "24hr\t\t: %s\n", + (reg & M48T86_REG_B_H24) ? "yes" : "no"); + + seq_printf(seq, "mode\t\t: %s\n", + (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); + + reg = ops->readb(M48T86_REG_D); + + seq_printf(seq, "battery\t\t: %s\n", + (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); + + return 0; +} + +static struct rtc_class_ops m48t86_rtc_ops = { + .read_time = m48t86_rtc_read_time, + .set_time = m48t86_rtc_set_time, + .proc = m48t86_rtc_proc, +}; + +static int __devinit m48t86_rtc_probe(struct platform_device *dev) +{ + unsigned char reg; + struct m48t86_ops *ops = dev->dev.platform_data; + struct rtc_device *rtc = rtc_device_register("m48t86", + &dev->dev, &m48t86_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + dev_err(&dev->dev, "unable to register\n"); + return PTR_ERR(rtc); + } + + platform_set_drvdata(dev, rtc); + + /* read battery status */ + reg = ops->readb(M48T86_REG_D); + dev_info(&dev->dev, "battery %s\n", + (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); + + return 0; +} + +static int __devexit m48t86_rtc_remove(struct platform_device *dev) +{ + struct rtc_device *rtc = platform_get_drvdata(dev); + + if (rtc) + rtc_device_unregister(rtc); + + platform_set_drvdata(dev, NULL); + + return 0; +} + +static struct platform_driver m48t86_rtc_platform_driver = { + .driver = { + .name = "rtc-m48t86", + .owner = THIS_MODULE, + }, + .probe = m48t86_rtc_probe, + .remove = __devexit_p(m48t86_rtc_remove), +}; + +static int __init m48t86_rtc_init(void) +{ + return platform_driver_register(&m48t86_rtc_platform_driver); +} + +static void __exit m48t86_rtc_exit(void) +{ + platform_driver_unregister(&m48t86_rtc_platform_driver); +} + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("M48T86 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_init(m48t86_rtc_init); +module_exit(m48t86_rtc_exit); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c new file mode 100644 index 000000000000..d857d45bdbe8 --- /dev/null +++ b/drivers/rtc/rtc-pcf8563.c @@ -0,0 +1,353 @@ +/* + * An I2C driver for the Philips PCF8563 RTC + * Copyright 2005-06 Tower Technologies + * + * Author: Alessandro Zummo <a.zummo@towertech.it> + * Maintainers: http://www.nslu2-linux.org/ + * + * based on the other drivers in this same directory. + * + * http://www.semiconductors.philips.com/acrobat/datasheets/PCF8563-04.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/i2c.h> +#include <linux/bcd.h> +#include <linux/rtc.h> + +#define DRV_VERSION "0.4.2" + +/* Addresses to scan: none + * This chip cannot be reliably autodetected. An empty eeprom + * located at 0x51 will pass the validation routine due to + * the way the registers are implemented. + */ +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Module parameters */ +I2C_CLIENT_INSMOD; + +#define PCF8563_REG_ST1 0x00 /* status */ +#define PCF8563_REG_ST2 0x01 + +#define PCF8563_REG_SC 0x02 /* datetime */ +#define PCF8563_REG_MN 0x03 +#define PCF8563_REG_HR 0x04 +#define PCF8563_REG_DM 0x05 +#define PCF8563_REG_DW 0x06 +#define PCF8563_REG_MO 0x07 +#define PCF8563_REG_YR 0x08 + +#define PCF8563_REG_AMN 0x09 /* alarm */ +#define PCF8563_REG_AHR 0x0A +#define PCF8563_REG_ADM 0x0B +#define PCF8563_REG_ADW 0x0C + +#define PCF8563_REG_CLKO 0x0D /* clock out */ +#define PCF8563_REG_TMRC 0x0E /* timer control */ +#define PCF8563_REG_TMR 0x0F /* timer */ + +#define PCF8563_SC_LV 0x80 /* low voltage */ +#define PCF8563_MO_C 0x80 /* century */ + +static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind); +static int pcf8563_detach(struct i2c_client *client); + +/* + * In the routines that deal directly with the pcf8563 hardware, we use + * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. + */ +static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + unsigned char buf[13] = { PCF8563_REG_ST1 }; + + struct i2c_msg msgs[] = { + { client->addr, 0, 1, buf }, /* setup read ptr */ + { client->addr, I2C_M_RD, 13, buf }, /* read status + date */ + }; + + /* read registers */ + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } + + if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) + dev_info(&client->dev, + "low voltage detected, date/time is not reliable.\n"); + + dev_dbg(&client->dev, + "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, " + "mday=%02x, wday=%02x, mon=%02x, year=%02x\n", + __FUNCTION__, + buf[0], buf[1], buf[2], buf[3], + buf[4], buf[5], buf[6], buf[7], + buf[8]); + + + tm->tm_sec = BCD2BIN(buf[PCF8563_REG_SC] & 0x7F); + tm->tm_min = BCD2BIN(buf[PCF8563_REG_MN] & 0x7F); + tm->tm_hour = BCD2BIN(buf[PCF8563_REG_HR] & 0x3F); /* rtc hr 0-23 */ + tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F); + tm->tm_wday = buf[PCF8563_REG_DW] & 0x07; + tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ + tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]) + + (buf[PCF8563_REG_MO] & PCF8563_MO_C ? 100 : 0); + + dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + /* the clock can give out invalid datetime, but we cannot return + * -EINVAL otherwise hwclock will refuse to set the time on bootup. + */ + if (rtc_valid_tm(tm) < 0) + dev_err(&client->dev, "retrieved date/time is not valid.\n"); + + return 0; +} + +static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + int i, err; + unsigned char buf[9]; + + dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + /* hours, minutes and seconds */ + buf[PCF8563_REG_SC] = BIN2BCD(tm->tm_sec); + buf[PCF8563_REG_MN] = BIN2BCD(tm->tm_min); + buf[PCF8563_REG_HR] = BIN2BCD(tm->tm_hour); + + buf[PCF8563_REG_DM] = BIN2BCD(tm->tm_mday); + + /* month, 1 - 12 */ + buf[PCF8563_REG_MO] = BIN2BCD(tm->tm_mon + 1); + + /* year and century */ + buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100); + if (tm->tm_year / 100) + buf[PCF8563_REG_MO] |= PCF8563_MO_C; + + buf[PCF8563_REG_DW] = tm->tm_wday & 0x07; + + /* write register's data */ + for (i = 0; i < 7; i++) { + unsigned char data[2] = { PCF8563_REG_SC + i, + buf[PCF8563_REG_SC + i] }; + + err = i2c_master_send(client, data, sizeof(data)); + if (err != sizeof(data)) { + dev_err(&client->dev, + "%s: err=%d addr=%02x, data=%02x\n", + __FUNCTION__, err, data[0], data[1]); + return -EIO; + } + }; + + return 0; +} + +struct pcf8563_limit +{ + unsigned char reg; + unsigned char mask; + unsigned char min; + unsigned char max; +}; + +static int pcf8563_validate_client(struct i2c_client *client) +{ + int i; + + static const struct pcf8563_limit pattern[] = { + /* register, mask, min, max */ + { PCF8563_REG_SC, 0x7F, 0, 59 }, + { PCF8563_REG_MN, 0x7F, 0, 59 }, + { PCF8563_REG_HR, 0x3F, 0, 23 }, + { PCF8563_REG_DM, 0x3F, 0, 31 }, + { PCF8563_REG_MO, 0x1F, 0, 12 }, + }; + + /* check limits (only registers with bcd values) */ + for (i = 0; i < ARRAY_SIZE(pattern); i++) { + int xfer; + unsigned char value; + unsigned char buf = pattern[i].reg; + + struct i2c_msg msgs[] = { + { client->addr, 0, 1, &buf }, + { client->addr, I2C_M_RD, 1, &buf }, + }; + + xfer = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + + if (xfer != ARRAY_SIZE(msgs)) { + dev_err(&client->adapter->dev, + "%s: could not read register 0x%02X\n", + __FUNCTION__, pattern[i].reg); + + return -EIO; + } + + value = BCD2BIN(buf & pattern[i].mask); + + if (value > pattern[i].max || + value < pattern[i].min) { + dev_dbg(&client->adapter->dev, + "%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, " + "max=%d, value=%d, raw=0x%02X\n", + __FUNCTION__, i, pattern[i].reg, pattern[i].mask, + pattern[i].min, pattern[i].max, + value, buf); + + return -ENODEV; + } + } + + return 0; +} + +static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return pcf8563_get_datetime(to_i2c_client(dev), tm); +} + +static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return pcf8563_set_datetime(to_i2c_client(dev), tm); +} + +static int pcf8563_rtc_proc(struct device *dev, struct seq_file *seq) +{ + seq_printf(seq, "24hr\t\t: yes\n"); + return 0; +} + +static struct rtc_class_ops pcf8563_rtc_ops = { + .proc = pcf8563_rtc_proc, + .read_time = pcf8563_rtc_read_time, + .set_time = pcf8563_rtc_set_time, +}; + +static int pcf8563_attach(struct i2c_adapter *adapter) +{ + return i2c_probe(adapter, &addr_data, pcf8563_probe); +} + +static struct i2c_driver pcf8563_driver = { + .driver = { + .name = "pcf8563", + }, + .id = I2C_DRIVERID_PCF8563, + .attach_adapter = &pcf8563_attach, + .detach_client = &pcf8563_detach, +}; + +static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *client; + struct rtc_device *rtc; + + int err = 0; + + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit; + } + + if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + + client->addr = address; + client->driver = &pcf8563_driver; + client->adapter = adapter; + + strlcpy(client->name, pcf8563_driver.driver.name, I2C_NAME_SIZE); + + /* Verify the chip is really an PCF8563 */ + if (kind < 0) { + if (pcf8563_validate_client(client) < 0) { + err = -ENODEV; + goto exit_kfree; + } + } + + /* Inform the i2c layer */ + if ((err = i2c_attach_client(client))) + goto exit_kfree; + + dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); + + rtc = rtc_device_register(pcf8563_driver.driver.name, &client->dev, + &pcf8563_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + dev_err(&client->dev, + "unable to register the class device\n"); + goto exit_detach; + } + + i2c_set_clientdata(client, rtc); + + return 0; + +exit_detach: + i2c_detach_client(client); + +exit_kfree: + kfree(client); + +exit: + return err; +} + +static int pcf8563_detach(struct i2c_client *client) +{ + int err; + struct rtc_device *rtc = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "%s\n", __FUNCTION__); + + if (rtc) + rtc_device_unregister(rtc); + + if ((err = i2c_detach_client(client))) + return err; + + kfree(client); + + return 0; +} + +static int __init pcf8563_init(void) +{ + return i2c_add_driver(&pcf8563_driver); +} + +static void __exit pcf8563_exit(void) +{ + i2c_del_driver(&pcf8563_driver); +} + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("Philips PCF8563/Epson RTC8564 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_init(pcf8563_init); +module_exit(pcf8563_exit); diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c new file mode 100644 index 000000000000..90b8a97a0919 --- /dev/null +++ b/drivers/rtc/rtc-proc.c @@ -0,0 +1,162 @@ +/* + * RTC subsystem, proc interface + * + * Copyright (C) 2005-06 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * based on arch/arm/common/rtctime.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +static struct class_device *rtc_dev = NULL; +static DEFINE_MUTEX(rtc_lock); + +static int rtc_proc_show(struct seq_file *seq, void *offset) +{ + int err; + struct class_device *class_dev = seq->private; + struct rtc_class_ops *ops = to_rtc_device(class_dev)->ops; + struct rtc_wkalrm alrm; + struct rtc_time tm; + + err = rtc_read_time(class_dev, &tm); + if (err == 0) { + seq_printf(seq, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); + } + + err = rtc_read_alarm(class_dev, &alrm); + if (err == 0) { + seq_printf(seq, "alrm_time\t: "); + if ((unsigned int)alrm.time.tm_hour <= 24) + seq_printf(seq, "%02d:", alrm.time.tm_hour); + else + seq_printf(seq, "**:"); + if ((unsigned int)alrm.time.tm_min <= 59) + seq_printf(seq, "%02d:", alrm.time.tm_min); + else + seq_printf(seq, "**:"); + if ((unsigned int)alrm.time.tm_sec <= 59) + seq_printf(seq, "%02d\n", alrm.time.tm_sec); + else + seq_printf(seq, "**\n"); + + seq_printf(seq, "alrm_date\t: "); + if ((unsigned int)alrm.time.tm_year <= 200) + seq_printf(seq, "%04d-", alrm.time.tm_year + 1900); + else + seq_printf(seq, "****-"); + if ((unsigned int)alrm.time.tm_mon <= 11) + seq_printf(seq, "%02d-", alrm.time.tm_mon + 1); + else + seq_printf(seq, "**-"); + if ((unsigned int)alrm.time.tm_mday <= 31) + seq_printf(seq, "%02d\n", alrm.time.tm_mday); + else + seq_printf(seq, "**\n"); + seq_printf(seq, "alrm_wakeup\t: %s\n", + alrm.enabled ? "yes" : "no"); + seq_printf(seq, "alrm_pending\t: %s\n", + alrm.pending ? "yes" : "no"); + } + + if (ops->proc) + ops->proc(class_dev->dev, seq); + + return 0; +} + +static int rtc_proc_open(struct inode *inode, struct file *file) +{ + struct class_device *class_dev = PDE(inode)->data; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + return single_open(file, rtc_proc_show, class_dev); +} + +static int rtc_proc_release(struct inode *inode, struct file *file) +{ + int res = single_release(inode, file); + module_put(THIS_MODULE); + return res; +} + +static struct file_operations rtc_proc_fops = { + .open = rtc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = rtc_proc_release, +}; + +static int rtc_proc_add_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + mutex_lock(&rtc_lock); + if (rtc_dev == NULL) { + struct proc_dir_entry *ent; + + rtc_dev = class_dev; + + ent = create_proc_entry("driver/rtc", 0, NULL); + if (ent) { + struct rtc_device *rtc = to_rtc_device(class_dev); + + ent->proc_fops = &rtc_proc_fops; + ent->owner = rtc->owner; + ent->data = class_dev; + + dev_info(class_dev->dev, "rtc intf: proc\n"); + } + else + rtc_dev = NULL; + } + mutex_unlock(&rtc_lock); + + return 0; +} + +static void rtc_proc_remove_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + mutex_lock(&rtc_lock); + if (rtc_dev == class_dev) { + remove_proc_entry("driver/rtc", NULL); + rtc_dev = NULL; + } + mutex_unlock(&rtc_lock); +} + +static struct class_interface rtc_proc_interface = { + .add = &rtc_proc_add_device, + .remove = &rtc_proc_remove_device, +}; + +static int __init rtc_proc_init(void) +{ + return rtc_interface_register(&rtc_proc_interface); +} + +static void __exit rtc_proc_exit(void) +{ + class_interface_unregister(&rtc_proc_interface); +} + +module_init(rtc_proc_init); +module_exit(rtc_proc_exit); + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("RTC class proc interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c new file mode 100644 index 000000000000..396c8681f66c --- /dev/null +++ b/drivers/rtc/rtc-rs5c372.c @@ -0,0 +1,294 @@ +/* + * An I2C driver for the Ricoh RS5C372 RTC + * + * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net> + * Copyright (C) 2006 Tower Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/i2c.h> +#include <linux/rtc.h> +#include <linux/bcd.h> + +#define DRV_VERSION "0.2" + +/* Addresses to scan */ +static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD; + +#define RS5C372_REG_SECS 0 +#define RS5C372_REG_MINS 1 +#define RS5C372_REG_HOURS 2 +#define RS5C372_REG_WDAY 3 +#define RS5C372_REG_DAY 4 +#define RS5C372_REG_MONTH 5 +#define RS5C372_REG_YEAR 6 +#define RS5C372_REG_TRIM 7 + +#define RS5C372_TRIM_XSL 0x80 +#define RS5C372_TRIM_MASK 0x7F + +#define RS5C372_REG_BASE 0 + +static int rs5c372_attach(struct i2c_adapter *adapter); +static int rs5c372_detach(struct i2c_client *client); +static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind); + +static struct i2c_driver rs5c372_driver = { + .driver = { + .name = "rs5c372", + }, + .attach_adapter = &rs5c372_attach, + .detach_client = &rs5c372_detach, +}; + +static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + unsigned char buf[7] = { RS5C372_REG_BASE }; + + /* this implements the 1st reading method, according + * to the datasheet. buf[0] is initialized with + * address ptr and transmission format register. + */ + struct i2c_msg msgs[] = { + { client->addr, 0, 1, buf }, + { client->addr, I2C_M_RD, 7, buf }, + }; + + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } + + tm->tm_sec = BCD2BIN(buf[RS5C372_REG_SECS] & 0x7f); + tm->tm_min = BCD2BIN(buf[RS5C372_REG_MINS] & 0x7f); + tm->tm_hour = BCD2BIN(buf[RS5C372_REG_HOURS] & 0x3f); + tm->tm_wday = BCD2BIN(buf[RS5C372_REG_WDAY] & 0x07); + tm->tm_mday = BCD2BIN(buf[RS5C372_REG_DAY] & 0x3f); + + /* tm->tm_mon is zero-based */ + tm->tm_mon = BCD2BIN(buf[RS5C372_REG_MONTH] & 0x1f) - 1; + + /* year is 1900 + tm->tm_year */ + tm->tm_year = BCD2BIN(buf[RS5C372_REG_YEAR]) + 100; + + dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + return 0; +} + +static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + unsigned char buf[8] = { RS5C372_REG_BASE }; + + dev_dbg(&client->dev, + "%s: secs=%d, mins=%d, hours=%d ", + "mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + + buf[1] = BIN2BCD(tm->tm_sec); + buf[2] = BIN2BCD(tm->tm_min); + buf[3] = BIN2BCD(tm->tm_hour); + buf[4] = BIN2BCD(tm->tm_wday); + buf[5] = BIN2BCD(tm->tm_mday); + buf[6] = BIN2BCD(tm->tm_mon + 1); + buf[7] = BIN2BCD(tm->tm_year - 100); + + if ((i2c_master_send(client, buf, 8)) != 8) { + dev_err(&client->dev, "%s: write error\n", __FUNCTION__); + return -EIO; + } + + return 0; +} + +static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) +{ + unsigned char buf = RS5C372_REG_TRIM; + + struct i2c_msg msgs[] = { + { client->addr, 0, 1, &buf }, + { client->addr, I2C_M_RD, 1, &buf }, + }; + + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } + + dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, trim); + + if (osc) + *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768; + + if (trim) + *trim = buf & RS5C372_TRIM_MASK; + + return 0; +} + +static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return rs5c372_get_datetime(to_i2c_client(dev), tm); +} + +static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return rs5c372_set_datetime(to_i2c_client(dev), tm); +} + +static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq) +{ + int err, osc, trim; + + seq_printf(seq, "24hr\t\t: yes\n"); + + if ((err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim)) == 0) { + seq_printf(seq, "%d.%03d KHz\n", osc / 1000, osc % 1000); + seq_printf(seq, "trim\t: %d\n", trim); + } + + return 0; +} + +static struct rtc_class_ops rs5c372_rtc_ops = { + .proc = rs5c372_rtc_proc, + .read_time = rs5c372_rtc_read_time, + .set_time = rs5c372_rtc_set_time, +}; + +static ssize_t rs5c372_sysfs_show_trim(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int trim; + + if (rs5c372_get_trim(to_i2c_client(dev), NULL, &trim) == 0) + return sprintf(buf, "0x%2x\n", trim); + + return 0; +} +static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL); + +static ssize_t rs5c372_sysfs_show_osc(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int osc; + + if (rs5c372_get_trim(to_i2c_client(dev), &osc, NULL) == 0) + return sprintf(buf, "%d.%03d KHz\n", osc / 1000, osc % 1000); + + return 0; +} +static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL); + +static int rs5c372_attach(struct i2c_adapter *adapter) +{ + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + return i2c_probe(adapter, &addr_data, rs5c372_probe); +} + +static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) +{ + int err = 0; + struct i2c_client *client; + struct rtc_device *rtc; + + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit; + } + + if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + + /* I2C client */ + client->addr = address; + client->driver = &rs5c372_driver; + client->adapter = adapter; + + strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE); + + /* Inform the i2c layer */ + if ((err = i2c_attach_client(client))) + goto exit_kfree; + + dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); + + rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev, + &rs5c372_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + dev_err(&client->dev, + "unable to register the class device\n"); + goto exit_detach; + } + + i2c_set_clientdata(client, rtc); + + device_create_file(&client->dev, &dev_attr_trim); + device_create_file(&client->dev, &dev_attr_osc); + + return 0; + +exit_detach: + i2c_detach_client(client); + +exit_kfree: + kfree(client); + +exit: + return err; +} + +static int rs5c372_detach(struct i2c_client *client) +{ + int err; + struct rtc_device *rtc = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "%s\n", __FUNCTION__); + + if (rtc) + rtc_device_unregister(rtc); + + if ((err = i2c_detach_client(client))) + return err; + + kfree(client); + + return 0; +} + +static __init int rs5c372_init(void) +{ + return i2c_add_driver(&rs5c372_driver); +} + +static __exit void rs5c372_exit(void) +{ + i2c_del_driver(&rs5c372_driver); +} + +module_init(rs5c372_init); +module_exit(rs5c372_exit); + +MODULE_AUTHOR( + "Pavel Mironchik <pmironchik@optifacio.net>, " + "Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("Ricoh RS5C372 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c new file mode 100644 index 000000000000..83b2bb480a16 --- /dev/null +++ b/drivers/rtc/rtc-sa1100.c @@ -0,0 +1,388 @@ +/* + * Real Time Clock interface for StrongARM SA1x00 and XScale PXA2xx + * + * Copyright (c) 2000 Nils Faerber + * + * Based on rtc.c by Paul Gortmaker + * + * Original Driver by Nils Faerber <nils@kernelconcepts.de> + * + * Modifications from: + * CIH <cih@coventive.com> + * Nicolas Pitre <nico@cam.org> + * Andrew Christian <andrew.christian@hp.com> + * + * Converted to the RTC subsystem and Driver Model + * by Richard Purdie <rpurdie@rpsys.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/rtc.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/string.h> +#include <linux/pm.h> + +#include <asm/bitops.h> +#include <asm/hardware.h> +#include <asm/irq.h> +#include <asm/rtc.h> + +#ifdef CONFIG_ARCH_PXA +#include <asm/arch/pxa-regs.h> +#endif + +#define TIMER_FREQ CLOCK_TICK_RATE +#define RTC_DEF_DIVIDER 32768 - 1 +#define RTC_DEF_TRIM 0 + +static unsigned long rtc_freq = 1024; +static struct rtc_time rtc_alarm; +static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED; + +static int rtc_update_alarm(struct rtc_time *alrm) +{ + struct rtc_time alarm_tm, now_tm; + unsigned long now, time; + int ret; + + do { + now = RCNR; + rtc_time_to_tm(now, &now_tm); + rtc_next_alarm_time(&alarm_tm, &now_tm, alrm); + ret = rtc_tm_to_time(&alarm_tm, &time); + if (ret != 0) + break; + + RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL); + RTAR = time; + } while (now != RCNR); + + return ret; +} + +static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id, + struct pt_regs *regs) +{ + struct platform_device *pdev = to_platform_device(dev_id); + struct rtc_device *rtc = platform_get_drvdata(pdev); + unsigned int rtsr; + unsigned long events = 0; + + spin_lock(&sa1100_rtc_lock); + + rtsr = RTSR; + /* clear interrupt sources */ + RTSR = 0; + RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2); + + /* clear alarm interrupt if it has occurred */ + if (rtsr & RTSR_AL) + rtsr &= ~RTSR_ALE; + RTSR = rtsr & (RTSR_ALE | RTSR_HZE); + + /* update irq data & counter */ + if (rtsr & RTSR_AL) + events |= RTC_AF | RTC_IRQF; + if (rtsr & RTSR_HZ) + events |= RTC_UF | RTC_IRQF; + + rtc_update_irq(&rtc->class_dev, 1, events); + + if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm)) + rtc_update_alarm(&rtc_alarm); + + spin_unlock(&sa1100_rtc_lock); + + return IRQ_HANDLED; +} + +static int rtc_timer1_count; + +static irqreturn_t timer1_interrupt(int irq, void *dev_id, + struct pt_regs *regs) +{ + struct platform_device *pdev = to_platform_device(dev_id); + struct rtc_device *rtc = platform_get_drvdata(pdev); + + /* + * If we match for the first time, rtc_timer1_count will be 1. + * Otherwise, we wrapped around (very unlikely but + * still possible) so compute the amount of missed periods. + * The match reg is updated only when the data is actually retrieved + * to avoid unnecessary interrupts. + */ + OSSR = OSSR_M1; /* clear match on timer1 */ + + rtc_update_irq(&rtc->class_dev, rtc_timer1_count, RTC_PF | RTC_IRQF); + + if (rtc_timer1_count == 1) + rtc_timer1_count = (rtc_freq * ((1<<30)/(TIMER_FREQ>>2))); + + return IRQ_HANDLED; +} + +static int sa1100_rtc_read_callback(struct device *dev, int data) +{ + if (data & RTC_PF) { + /* interpolate missed periods and set match for the next */ + unsigned long period = TIMER_FREQ/rtc_freq; + unsigned long oscr = OSCR; + unsigned long osmr1 = OSMR1; + unsigned long missed = (oscr - osmr1)/period; + data += missed << 8; + OSSR = OSSR_M1; /* clear match on timer 1 */ + OSMR1 = osmr1 + (missed + 1)*period; + /* Ensure we didn't miss another match in the mean time. + * Here we compare (match - OSCR) 8 instead of 0 -- + * see comment in pxa_timer_interrupt() for explanation. + */ + while( (signed long)((osmr1 = OSMR1) - OSCR) <= 8 ) { + data += 0x100; + OSSR = OSSR_M1; /* clear match on timer 1 */ + OSMR1 = osmr1 + period; + } + } + return data; +} + +static int sa1100_rtc_open(struct device *dev) +{ + int ret; + + ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, SA_INTERRUPT, + "rtc 1Hz", dev); + if (ret) { + printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_RTC1Hz); + goto fail_ui; + } + ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, SA_INTERRUPT, + "rtc Alrm", dev); + if (ret) { + printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_RTCAlrm); + goto fail_ai; + } + ret = request_irq(IRQ_OST1, timer1_interrupt, SA_INTERRUPT, + "rtc timer", dev); + if (ret) { + printk(KERN_ERR "rtc: IRQ%d already in use.\n", IRQ_OST1); + goto fail_pi; + } + return 0; + + fail_pi: + free_irq(IRQ_RTCAlrm, NULL); + fail_ai: + free_irq(IRQ_RTC1Hz, NULL); + fail_ui: + return ret; +} + +static void sa1100_rtc_release(struct device *dev) +{ + spin_lock_irq(&sa1100_rtc_lock); + RTSR = 0; + OIER &= ~OIER_E1; + OSSR = OSSR_M1; + spin_unlock_irq(&sa1100_rtc_lock); + + free_irq(IRQ_OST1, dev); + free_irq(IRQ_RTCAlrm, dev); + free_irq(IRQ_RTC1Hz, dev); +} + + +static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, + unsigned long arg) +{ + switch(cmd) { + case RTC_AIE_OFF: + spin_lock_irq(&sa1100_rtc_lock); + RTSR &= ~RTSR_ALE; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_AIE_ON: + spin_lock_irq(&sa1100_rtc_lock); + RTSR |= RTSR_ALE; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_UIE_OFF: + spin_lock_irq(&sa1100_rtc_lock); + RTSR &= ~RTSR_HZE; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_UIE_ON: + spin_lock_irq(&sa1100_rtc_lock); + RTSR |= RTSR_HZE; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_PIE_OFF: + spin_lock_irq(&sa1100_rtc_lock); + OIER &= ~OIER_E1; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_PIE_ON: + if ((rtc_freq > 64) && !capable(CAP_SYS_RESOURCE)) + return -EACCES; + spin_lock_irq(&sa1100_rtc_lock); + OSMR1 = TIMER_FREQ/rtc_freq + OSCR; + OIER |= OIER_E1; + rtc_timer1_count = 1; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; + case RTC_IRQP_READ: + return put_user(rtc_freq, (unsigned long *)arg); + case RTC_IRQP_SET: + if (arg < 1 || arg > TIMER_FREQ) + return -EINVAL; + if ((arg > 64) && (!capable(CAP_SYS_RESOURCE))) + return -EACCES; + rtc_freq = arg; + return 0; + } + return -EINVAL; +} + +static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + rtc_time_to_tm(RCNR, tm); + return 0; +} + +static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long time; + int ret; + + ret = rtc_tm_to_time(tm, &time); + if (ret == 0) + RCNR = time; + return ret; +} + +static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time)); + alrm->pending = RTSR & RTSR_AL ? 1 : 0; + return 0; +} + +static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + int ret; + + spin_lock_irq(&sa1100_rtc_lock); + ret = rtc_update_alarm(&alrm->time); + if (ret == 0) { + memcpy(&rtc_alarm, &alrm->time, sizeof(struct rtc_time)); + + if (alrm->enabled) + enable_irq_wake(IRQ_RTCAlrm); + else + disable_irq_wake(IRQ_RTCAlrm); + } + spin_unlock_irq(&sa1100_rtc_lock); + + return ret; +} + +static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) +{ + seq_printf(seq, "trim/divider\t: 0x%08x\n", RTTR); + seq_printf(seq, "alarm_IRQ\t: %s\n", + (RTSR & RTSR_ALE) ? "yes" : "no" ); + seq_printf(seq, "update_IRQ\t: %s\n", + (RTSR & RTSR_HZE) ? "yes" : "no"); + seq_printf(seq, "periodic_IRQ\t: %s\n", + (OIER & OIER_E1) ? "yes" : "no"); + seq_printf(seq, "periodic_freq\t: %ld\n", rtc_freq); + + return 0; +} + +static struct rtc_class_ops sa1100_rtc_ops = { + .open = sa1100_rtc_open, + .read_callback = sa1100_rtc_read_callback, + .release = sa1100_rtc_release, + .ioctl = sa1100_rtc_ioctl, + .read_time = sa1100_rtc_read_time, + .set_time = sa1100_rtc_set_time, + .read_alarm = sa1100_rtc_read_alarm, + .set_alarm = sa1100_rtc_set_alarm, + .proc = sa1100_rtc_proc, +}; + +static int sa1100_rtc_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + /* + * According to the manual we should be able to let RTTR be zero + * and then a default diviser for a 32.768KHz clock is used. + * Apparently this doesn't work, at least for my SA1110 rev 5. + * If the clock divider is uninitialized then reset it to the + * default value to get the 1Hz clock. + */ + if (RTTR == 0) { + RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); + printk(KERN_WARNING "rtc: warning: initializing default clock divider/trim value\n"); + /* The current RTC value probably doesn't make sense either */ + RCNR = 0; + } + + rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops, + THIS_MODULE); + + if (IS_ERR(rtc)) { + dev_err(&pdev->dev, "Unable to register the RTC device\n"); + return PTR_ERR(rtc); + } + + platform_set_drvdata(pdev, rtc); + + dev_info(&pdev->dev, "SA11xx/PXA2xx RTC Registered\n"); + + return 0; +} + +static int sa1100_rtc_remove(struct platform_device *pdev) +{ + struct rtc_device *rtc = platform_get_drvdata(pdev); + + if (rtc) + rtc_device_unregister(rtc); + + return 0; +} + +static struct platform_driver sa1100_rtc_driver = { + .probe = sa1100_rtc_probe, + .remove = sa1100_rtc_remove, + .driver = { + .name = "sa1100-rtc", + }, +}; + +static int __init sa1100_rtc_init(void) +{ + return platform_driver_register(&sa1100_rtc_driver); +} + +static void __exit sa1100_rtc_exit(void) +{ + platform_driver_unregister(&sa1100_rtc_driver); +} + +module_init(sa1100_rtc_init); +module_exit(sa1100_rtc_exit); + +MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); +MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c new file mode 100644 index 000000000000..7c1f3d2e53c4 --- /dev/null +++ b/drivers/rtc/rtc-sysfs.c @@ -0,0 +1,124 @@ +/* + * RTC subsystem, sysfs interface + * + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/module.h> +#include <linux/rtc.h> + +/* device attributes */ + +static ssize_t rtc_sysfs_show_name(struct class_device *dev, char *buf) +{ + return sprintf(buf, "%s\n", to_rtc_device(dev)->name); +} +static CLASS_DEVICE_ATTR(name, S_IRUGO, rtc_sysfs_show_name, NULL); + +static ssize_t rtc_sysfs_show_date(struct class_device *dev, char *buf) +{ + ssize_t retval; + struct rtc_time tm; + + retval = rtc_read_time(dev, &tm); + if (retval == 0) { + retval = sprintf(buf, "%04d-%02d-%02d\n", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); + } + + return retval; +} +static CLASS_DEVICE_ATTR(date, S_IRUGO, rtc_sysfs_show_date, NULL); + +static ssize_t rtc_sysfs_show_time(struct class_device *dev, char *buf) +{ + ssize_t retval; + struct rtc_time tm; + + retval = rtc_read_time(dev, &tm); + if (retval == 0) { + retval = sprintf(buf, "%02d:%02d:%02d\n", + tm.tm_hour, tm.tm_min, tm.tm_sec); + } + + return retval; +} +static CLASS_DEVICE_ATTR(time, S_IRUGO, rtc_sysfs_show_time, NULL); + +static ssize_t rtc_sysfs_show_since_epoch(struct class_device *dev, char *buf) +{ + ssize_t retval; + struct rtc_time tm; + + retval = rtc_read_time(dev, &tm); + if (retval == 0) { + unsigned long time; + rtc_tm_to_time(&tm, &time); + retval = sprintf(buf, "%lu\n", time); + } + + return retval; +} +static CLASS_DEVICE_ATTR(since_epoch, S_IRUGO, rtc_sysfs_show_since_epoch, NULL); + +static struct attribute *rtc_attrs[] = { + &class_device_attr_name.attr, + &class_device_attr_date.attr, + &class_device_attr_time.attr, + &class_device_attr_since_epoch.attr, + NULL, +}; + +static struct attribute_group rtc_attr_group = { + .attrs = rtc_attrs, +}; + +static int __devinit rtc_sysfs_add_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + int err; + + dev_info(class_dev->dev, "rtc intf: sysfs\n"); + + err = sysfs_create_group(&class_dev->kobj, &rtc_attr_group); + if (err) + dev_err(class_dev->dev, + "failed to create sysfs attributes\n"); + + return err; +} + +static void rtc_sysfs_remove_device(struct class_device *class_dev, + struct class_interface *class_intf) +{ + sysfs_remove_group(&class_dev->kobj, &rtc_attr_group); +} + +/* interface registration */ + +static struct class_interface rtc_sysfs_interface = { + .add = &rtc_sysfs_add_device, + .remove = &rtc_sysfs_remove_device, +}; + +static int __init rtc_sysfs_init(void) +{ + return rtc_interface_register(&rtc_sysfs_interface); +} + +static void __exit rtc_sysfs_exit(void) +{ + class_interface_unregister(&rtc_sysfs_interface); +} + +module_init(rtc_sysfs_init); +module_exit(rtc_sysfs_exit); + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("RTC class sysfs interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c new file mode 100644 index 000000000000..43d107487820 --- /dev/null +++ b/drivers/rtc/rtc-test.c @@ -0,0 +1,204 @@ +/* + * An RTC test device/driver + * Copyright (C) 2005 Tower Technologies + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> + +static struct platform_device *test0 = NULL, *test1 = NULL; + +static int test_rtc_read_alarm(struct device *dev, + struct rtc_wkalrm *alrm) +{ + return 0; +} + +static int test_rtc_set_alarm(struct device *dev, + struct rtc_wkalrm *alrm) +{ + return 0; +} + +static int test_rtc_read_time(struct device *dev, + struct rtc_time *tm) +{ + rtc_time_to_tm(get_seconds(), tm); + return 0; +} + +static int test_rtc_set_time(struct device *dev, + struct rtc_time *tm) +{ + return 0; +} + +static int test_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + return 0; +} + +static int test_rtc_proc(struct device *dev, struct seq_file *seq) +{ + struct platform_device *plat_dev = to_platform_device(dev); + + seq_printf(seq, "24hr\t\t: yes\n"); + seq_printf(seq, "test\t\t: yes\n"); + seq_printf(seq, "id\t\t: %d\n", plat_dev->id); + + return 0; +} + +static int test_rtc_ioctl(struct device *dev, unsigned int cmd, + unsigned long arg) +{ + /* We do support interrupts, they're generated + * using the sysfs interface. + */ + switch (cmd) { + case RTC_PIE_ON: + case RTC_PIE_OFF: + case RTC_UIE_ON: + case RTC_UIE_OFF: + case RTC_AIE_ON: + case RTC_AIE_OFF: + return 0; + + default: + return -EINVAL; + } +} + +static struct rtc_class_ops test_rtc_ops = { + .proc = test_rtc_proc, + .read_time = test_rtc_read_time, + .set_time = test_rtc_set_time, + .read_alarm = test_rtc_read_alarm, + .set_alarm = test_rtc_set_alarm, + .set_mmss = test_rtc_set_mmss, + .ioctl = test_rtc_ioctl, +}; + +static ssize_t test_irq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 42); +} +static ssize_t test_irq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int retval; + struct platform_device *plat_dev = to_platform_device(dev); + struct rtc_device *rtc = platform_get_drvdata(plat_dev); + + retval = count; + if (strncmp(buf, "tick", 4) == 0) + rtc_update_irq(&rtc->class_dev, 1, RTC_PF | RTC_IRQF); + else if (strncmp(buf, "alarm", 5) == 0) + rtc_update_irq(&rtc->class_dev, 1, RTC_AF | RTC_IRQF); + else if (strncmp(buf, "update", 6) == 0) + rtc_update_irq(&rtc->class_dev, 1, RTC_UF | RTC_IRQF); + else + retval = -EINVAL; + + return retval; +} +static DEVICE_ATTR(irq, S_IRUGO | S_IWUSR, test_irq_show, test_irq_store); + +static int test_probe(struct platform_device *plat_dev) +{ + int err; + struct rtc_device *rtc = rtc_device_register("test", &plat_dev->dev, + &test_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + dev_err(&plat_dev->dev, + "unable to register the class device\n"); + return err; + } + device_create_file(&plat_dev->dev, &dev_attr_irq); + + platform_set_drvdata(plat_dev, rtc); + + return 0; +} + +static int __devexit test_remove(struct platform_device *plat_dev) +{ + struct rtc_device *rtc = platform_get_drvdata(plat_dev); + + rtc_device_unregister(rtc); + device_remove_file(&plat_dev->dev, &dev_attr_irq); + + return 0; +} + +static struct platform_driver test_drv = { + .probe = test_probe, + .remove = __devexit_p(test_remove), + .driver = { + .name = "rtc-test", + .owner = THIS_MODULE, + }, +}; + +static int __init test_init(void) +{ + int err; + + if ((err = platform_driver_register(&test_drv))) + return err; + + if ((test0 = platform_device_alloc("rtc-test", 0)) == NULL) { + err = -ENOMEM; + goto exit_driver_unregister; + } + + if ((test1 = platform_device_alloc("rtc-test", 1)) == NULL) { + err = -ENOMEM; + goto exit_free_test0; + } + + if ((err = platform_device_add(test0))) + goto exit_free_test1; + + if ((err = platform_device_add(test1))) + goto exit_device_unregister; + + return 0; + +exit_device_unregister: + platform_device_unregister(test0); + +exit_free_test1: + platform_device_put(test1); + +exit_free_test0: + platform_device_put(test0); + +exit_driver_unregister: + platform_driver_unregister(&test_drv); + return err; +} + +static void __exit test_exit(void) +{ + platform_device_unregister(test0); + platform_device_unregister(test1); + platform_driver_unregister(&test_drv); +} + +MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); +MODULE_DESCRIPTION("RTC test driver/device"); +MODULE_LICENSE("GPL"); + +module_init(test_init); +module_exit(test_exit); diff --git a/drivers/i2c/chips/x1205.c b/drivers/rtc/rtc-x1205.c index 245fffa92dbd..621d17afc0d9 100644 --- a/drivers/i2c/chips/x1205.c +++ b/drivers/rtc/rtc-x1205.c @@ -1,32 +1,25 @@ /* - * x1205.c - An i2c driver for the Xicor X1205 RTC - * Copyright 2004 Karen Spearel - * Copyright 2005 Alessandro Zummo + * An i2c driver for the Xicor/Intersil X1205 RTC + * Copyright 2004 Karen Spearel + * Copyright 2005 Alessandro Zummo * - * please send all reports to: - * kas11 at tampabay dot rr dot com - * a dot zummo at towertech dot it + * please send all reports to: + * Karen Spearel <kas111 at gmail dot com> + * Alessandro Zummo <a.zummo@towertech.it> * - * based on the other drivers in this same directory. + * based on a lot of other RTC drivers. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> #include <linux/i2c.h> -#include <linux/string.h> #include <linux/bcd.h> #include <linux/rtc.h> -#include <linux/list.h> +#include <linux/delay.h> -#include <linux/x1205.h> - -#define DRV_VERSION "0.9.9" +#define DRV_VERSION "1.0.6" /* Addresses to scan: none. This chip is located at * 0x6f and uses a two bytes register addressing. @@ -40,8 +33,6 @@ static unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD; -I2C_CLIENT_MODULE_PARM(hctosys, - "Set the system time from the hardware clock upon initialization"); /* offsets into CCR area */ @@ -101,107 +92,35 @@ I2C_CLIENT_MODULE_PARM(hctosys, static int x1205_attach(struct i2c_adapter *adapter); static int x1205_detach(struct i2c_client *client); static int x1205_probe(struct i2c_adapter *adapter, int address, int kind); -static int x1205_command(struct i2c_client *client, unsigned int cmd, - void *arg); static struct i2c_driver x1205_driver = { - .driver = { + .driver = { .name = "x1205", }, + .id = I2C_DRIVERID_X1205, .attach_adapter = &x1205_attach, .detach_client = &x1205_detach, }; -struct x1205_data { - struct i2c_client client; - struct list_head list; - unsigned int epoch; -}; - -static const unsigned char days_in_mo[] = - { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; - -static LIST_HEAD(x1205_clients); - -/* Workaround until the I2C subsytem will allow to send - * commands to a specific client. This function will send the command - * to the first client. - */ -int x1205_do_command(unsigned int cmd, void *arg) -{ - struct list_head *walk; - struct list_head *tmp; - struct x1205_data *data; - - list_for_each_safe(walk, tmp, &x1205_clients) { - data = list_entry(walk, struct x1205_data, list); - return x1205_command(&data->client, cmd, arg); - } - - return -ENODEV; -} - -#define is_leap(year) \ - ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) - -/* make sure the rtc_time values are in bounds */ -static int x1205_validate_tm(struct rtc_time *tm) -{ - int year = tm->tm_year + 1900; - - if ((tm->tm_year < 70) || (tm->tm_year > 255)) - return -EINVAL; - - if ((tm->tm_mon > 11) || (tm->tm_mday == 0)) - return -EINVAL; - - if (tm->tm_mday > days_in_mo[tm->tm_mon] - + ((tm->tm_mon == 1) && is_leap(year))) - return -EINVAL; - - if ((tm->tm_hour >= 24) || (tm->tm_min >= 60) || (tm->tm_sec >= 60)) - return -EINVAL; - - return 0; -} - /* * In the routines that deal directly with the x1205 hardware, we use * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch * Epoch is initialized as 2000. Time is set to UTC. */ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm, - u8 reg_base) + unsigned char reg_base) { unsigned char dt_addr[2] = { 0, reg_base }; - static unsigned char sr_addr[2] = { 0, X1205_REG_SR }; - unsigned char buf[8], sr; + unsigned char buf[8]; struct i2c_msg msgs[] = { - { client->addr, 0, 2, sr_addr }, /* setup read ptr */ - { client->addr, I2C_M_RD, 1, &sr }, /* read status */ { client->addr, 0, 2, dt_addr }, /* setup read ptr */ { client->addr, I2C_M_RD, 8, buf }, /* read date */ }; - struct x1205_data *data = i2c_get_clientdata(client); - - /* read status register */ - if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { - dev_err(&client->dev, "%s: read error\n", __FUNCTION__); - return -EIO; - } - - /* check for battery failure */ - if (sr & X1205_SR_RTCF) { - dev_warn(&client->dev, - "Clock had a power failure, you must set the date.\n"); - return -EINVAL; - } - /* read date registers */ - if ((i2c_transfer(client->adapter, &msgs[2], 2)) != 2) { + if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { dev_err(&client->dev, "%s: read error\n", __FUNCTION__); return -EIO; } @@ -217,9 +136,9 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm, tm->tm_min = BCD2BIN(buf[CCR_MIN]); tm->tm_hour = BCD2BIN(buf[CCR_HOUR] & 0x3F); /* hr is 0-23 */ tm->tm_mday = BCD2BIN(buf[CCR_MDAY]); - tm->tm_mon = BCD2BIN(buf[CCR_MONTH]); - data->epoch = BCD2BIN(buf[CCR_Y2K]) * 100; - tm->tm_year = BCD2BIN(buf[CCR_YEAR]) + data->epoch - 1900; + tm->tm_mon = BCD2BIN(buf[CCR_MONTH]) - 1; /* mon is 0-11 */ + tm->tm_year = BCD2BIN(buf[CCR_YEAR]) + + (BCD2BIN(buf[CCR_Y2K]) * 100) - 1900; tm->tm_wday = buf[CCR_WDAY]; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " @@ -231,11 +150,28 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm, return 0; } +static int x1205_get_status(struct i2c_client *client, unsigned char *sr) +{ + static unsigned char sr_addr[2] = { 0, X1205_REG_SR }; + + struct i2c_msg msgs[] = { + { client->addr, 0, 2, sr_addr }, /* setup read ptr */ + { client->addr, I2C_M_RD, 1, sr }, /* read status */ + }; + + /* read status register */ + if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __FUNCTION__); + return -EIO; + } + + return 0; +} + static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, int datetoo, u8 reg_base) { - int i, err, xfer; - + int i, xfer; unsigned char buf[8]; static const unsigned char wel[3] = { 0, X1205_REG_SR, @@ -246,17 +182,10 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 }; - struct x1205_data *data = i2c_get_clientdata(client); - - /* check if all values in the tm struct are correct */ - if ((err = x1205_validate_tm(tm)) < 0) - return err; - - dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " - "mday=%d, mon=%d, year=%d, wday=%d\n", + dev_dbg(&client->dev, + "%s: secs=%d, mins=%d, hours=%d\n", __FUNCTION__, - tm->tm_sec, tm->tm_min, tm->tm_hour, - tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + tm->tm_sec, tm->tm_min, tm->tm_hour); buf[CCR_SEC] = BIN2BCD(tm->tm_sec); buf[CCR_MIN] = BIN2BCD(tm->tm_min); @@ -266,26 +195,29 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, /* should we also set the date? */ if (datetoo) { + dev_dbg(&client->dev, + "%s: mday=%d, mon=%d, year=%d, wday=%d\n", + __FUNCTION__, + tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + buf[CCR_MDAY] = BIN2BCD(tm->tm_mday); - /* month, 0 - 11 */ - buf[CCR_MONTH] = BIN2BCD(tm->tm_mon); + /* month, 1 - 12 */ + buf[CCR_MONTH] = BIN2BCD(tm->tm_mon + 1); - /* year, since 1900 */ - buf[CCR_YEAR] = BIN2BCD(tm->tm_year + 1900 - data->epoch); + /* year, since the rtc epoch*/ + buf[CCR_YEAR] = BIN2BCD(tm->tm_year % 100); buf[CCR_WDAY] = tm->tm_wday & 0x07; - buf[CCR_Y2K] = BIN2BCD(data->epoch / 100); + buf[CCR_Y2K] = BIN2BCD(tm->tm_year / 100); } /* this sequence is required to unlock the chip */ - xfer = i2c_master_send(client, wel, 3); - if (xfer != 3) { + if ((xfer = i2c_master_send(client, wel, 3)) != 3) { dev_err(&client->dev, "%s: wel - %d\n", __FUNCTION__, xfer); return -EIO; } - xfer = i2c_master_send(client, rwel, 3); - if (xfer != 3) { + if ((xfer = i2c_master_send(client, rwel, 3)) != 3) { dev_err(&client->dev, "%s: rwel - %d\n", __FUNCTION__, xfer); return -EIO; } @@ -305,8 +237,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, }; /* disable further writes */ - xfer = i2c_master_send(client, diswe, 3); - if (xfer != 3) { + if ((xfer = i2c_master_send(client, diswe, 3)) != 3) { dev_err(&client->dev, "%s: diswe - %d\n", __FUNCTION__, xfer); return -EIO; } @@ -314,6 +245,20 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm, return 0; } +static int x1205_fix_osc(struct i2c_client *client) +{ + int err; + struct rtc_time tm; + + tm.tm_hour = tm.tm_min = tm.tm_sec = 0; + + if ((err = x1205_set_datetime(client, &tm, 0, X1205_CCR_BASE)) < 0) + dev_err(&client->dev, + "unable to restart the oscillator\n"); + + return err; +} + static int x1205_get_dtrim(struct i2c_client *client, int *trim) { unsigned char dtr; @@ -380,60 +325,9 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim) return 0; } -static int x1205_hctosys(struct i2c_client *client) -{ - int err; - - struct rtc_time tm; - struct timespec tv; - - err = x1205_command(client, X1205_CMD_GETDATETIME, &tm); - - if (err) { - dev_err(&client->dev, - "Unable to set the system clock\n"); - return err; - } - - /* IMPORTANT: the RTC only stores whole seconds. It is arbitrary - * whether it stores the most close value or the value with partial - * seconds truncated. However, it is important that we use it to store - * the truncated value. This is because otherwise it is necessary, - * in an rtc sync function, to read both xtime.tv_sec and - * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read - * of >32bits is not possible. So storing the most close value would - * slow down the sync API. So here we have the truncated value and - * the best guess is to add 0.5s. - */ - - tv.tv_nsec = NSEC_PER_SEC >> 1; - - /* WARNING: this is not the C library 'mktime' call, it is a built in - * inline function from include/linux/time.h. It expects (requires) - * the month to be in the range 1-12 - */ - - tv.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, - tm.tm_min, tm.tm_sec); - - do_settimeofday(&tv); - - dev_info(&client->dev, - "setting the system clock to %d-%d-%d %d:%d:%d\n", - tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, - tm.tm_sec); - - return 0; -} - struct x1205_limit { - unsigned char reg; - unsigned char mask; - unsigned char min; - unsigned char max; + unsigned char reg, mask, min, max; }; static int x1205_validate_client(struct i2c_client *client) @@ -477,11 +371,10 @@ static int x1205_validate_client(struct i2c_client *client) { client->addr, I2C_M_RD, 1, &buf }, }; - xfer = i2c_transfer(client->adapter, msgs, 2); - if (xfer != 2) { + if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { dev_err(&client->adapter->dev, "%s: could not read register %x\n", - __FUNCTION__, addr[1]); + __FUNCTION__, probe_zero_pattern[i]); return -EIO; } @@ -489,7 +382,7 @@ static int x1205_validate_client(struct i2c_client *client) if ((buf & probe_zero_pattern[i+1]) != 0) { dev_err(&client->adapter->dev, "%s: register=%02x, zero pattern=%d, value=%x\n", - __FUNCTION__, addr[1], i, buf); + __FUNCTION__, probe_zero_pattern[i], i, buf); return -ENODEV; } @@ -506,12 +399,10 @@ static int x1205_validate_client(struct i2c_client *client) { client->addr, I2C_M_RD, 1, ® }, }; - xfer = i2c_transfer(client->adapter, msgs, 2); - - if (xfer != 2) { + if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) { dev_err(&client->adapter->dev, "%s: could not read register %x\n", - __FUNCTION__, addr[1]); + __FUNCTION__, probe_limits_pattern[i].reg); return -EIO; } @@ -522,7 +413,8 @@ static int x1205_validate_client(struct i2c_client *client) value < probe_limits_pattern[i].min) { dev_dbg(&client->adapter->dev, "%s: register=%x, lim pattern=%d, value=%d\n", - __FUNCTION__, addr[1], i, value); + __FUNCTION__, probe_limits_pattern[i].reg, + i, value); return -ENODEV; } @@ -531,37 +423,89 @@ static int x1205_validate_client(struct i2c_client *client) return 0; } -static int x1205_attach(struct i2c_adapter *adapter) +static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { - dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + return x1205_get_datetime(to_i2c_client(dev), + &alrm->time, X1205_ALM0_BASE); +} - return i2c_probe(adapter, &addr_data, x1205_probe); +static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + return x1205_set_datetime(to_i2c_client(dev), + &alrm->time, 1, X1205_ALM0_BASE); } -int x1205_direct_attach(int adapter_id, - struct i2c_client_address_data *address_data) +static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm) { - int err; - struct i2c_adapter *adapter = i2c_get_adapter(adapter_id); + return x1205_get_datetime(to_i2c_client(dev), + tm, X1205_CCR_BASE); +} - if (adapter) { - err = i2c_probe(adapter, - address_data, x1205_probe); +static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return x1205_set_datetime(to_i2c_client(dev), + tm, 1, X1205_CCR_BASE); +} - i2c_put_adapter(adapter); +static int x1205_rtc_proc(struct device *dev, struct seq_file *seq) +{ + int err, dtrim, atrim; - return err; - } + seq_printf(seq, "24hr\t\t: yes\n"); - return -ENODEV; + if ((err = x1205_get_dtrim(to_i2c_client(dev), &dtrim)) == 0) + seq_printf(seq, "digital_trim\t: %d ppm\n", dtrim); + + if ((err = x1205_get_atrim(to_i2c_client(dev), &atrim)) == 0) + seq_printf(seq, "analog_trim\t: %d.%02d pF\n", + atrim / 1000, atrim % 1000); + return 0; } -static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) +static struct rtc_class_ops x1205_rtc_ops = { + .proc = x1205_rtc_proc, + .read_time = x1205_rtc_read_time, + .set_time = x1205_rtc_set_time, + .read_alarm = x1205_rtc_read_alarm, + .set_alarm = x1205_rtc_set_alarm, +}; + +static ssize_t x1205_sysfs_show_atrim(struct device *dev, + struct device_attribute *attr, char *buf) { - struct i2c_client *client; - struct x1205_data *data; + int atrim; + + if (x1205_get_atrim(to_i2c_client(dev), &atrim) == 0) + return sprintf(buf, "%d.%02d pF\n", + atrim / 1000, atrim % 1000); + return 0; +} +static DEVICE_ATTR(atrim, S_IRUGO, x1205_sysfs_show_atrim, NULL); + +static ssize_t x1205_sysfs_show_dtrim(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int dtrim; + + if (x1205_get_dtrim(to_i2c_client(dev), &dtrim) == 0) + return sprintf(buf, "%d ppm\n", dtrim); + + return 0; +} +static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL); +static int x1205_attach(struct i2c_adapter *adapter) +{ + dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); + return i2c_probe(adapter, &addr_data, x1205_probe); +} + +static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) +{ int err = 0; + unsigned char sr; + struct i2c_client *client; + struct rtc_device *rtc; dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); @@ -570,22 +514,17 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) goto exit; } - if (!(data = kzalloc(sizeof(struct x1205_data), GFP_KERNEL))) { + if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - /* Initialize our structures */ - data->epoch = 2000; - - client = &data->client; + /* I2C client */ client->addr = address; client->driver = &x1205_driver; client->adapter = adapter; - strlcpy(client->name, "x1205", I2C_NAME_SIZE); - - i2c_set_clientdata(client, data); + strlcpy(client->name, x1205_driver.driver.name, I2C_NAME_SIZE); /* Verify the chip is really an X1205 */ if (kind < 0) { @@ -599,18 +538,43 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind) if ((err = i2c_attach_client(client))) goto exit_kfree; - list_add(&data->list, &x1205_clients); - dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); - /* If requested, set the system time */ - if (hctosys) - x1205_hctosys(client); + rtc = rtc_device_register(x1205_driver.driver.name, &client->dev, + &x1205_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + dev_err(&client->dev, + "unable to register the class device\n"); + goto exit_detach; + } + + i2c_set_clientdata(client, rtc); + + /* Check for power failures and eventualy enable the osc */ + if ((err = x1205_get_status(client, &sr)) == 0) { + if (sr & X1205_SR_RTCF) { + dev_err(&client->dev, + "power failure detected, " + "please set the clock\n"); + udelay(50); + x1205_fix_osc(client); + } + } + else + dev_err(&client->dev, "couldn't read status\n"); + + device_create_file(&client->dev, &dev_attr_atrim); + device_create_file(&client->dev, &dev_attr_dtrim); return 0; +exit_detach: + i2c_detach_client(client); + exit_kfree: - kfree(data); + kfree(client); exit: return err; @@ -619,61 +583,21 @@ exit: static int x1205_detach(struct i2c_client *client) { int err; - struct x1205_data *data = i2c_get_clientdata(client); + struct rtc_device *rtc = i2c_get_clientdata(client); dev_dbg(&client->dev, "%s\n", __FUNCTION__); + if (rtc) + rtc_device_unregister(rtc); + if ((err = i2c_detach_client(client))) return err; - list_del(&data->list); - - kfree(data); + kfree(client); return 0; } -static int x1205_command(struct i2c_client *client, unsigned int cmd, - void *param) -{ - if (param == NULL) - return -EINVAL; - - if (!capable(CAP_SYS_TIME)) - return -EACCES; - - dev_dbg(&client->dev, "%s: cmd=%d\n", __FUNCTION__, cmd); - - switch (cmd) { - case X1205_CMD_GETDATETIME: - return x1205_get_datetime(client, param, X1205_CCR_BASE); - - case X1205_CMD_SETTIME: - return x1205_set_datetime(client, param, 0, - X1205_CCR_BASE); - - case X1205_CMD_SETDATETIME: - return x1205_set_datetime(client, param, 1, - X1205_CCR_BASE); - - case X1205_CMD_GETALARM: - return x1205_get_datetime(client, param, X1205_ALM0_BASE); - - case X1205_CMD_SETALARM: - return x1205_set_datetime(client, param, 1, - X1205_ALM0_BASE); - - case X1205_CMD_GETDTRIM: - return x1205_get_dtrim(client, param); - - case X1205_CMD_GETATRIM: - return x1205_get_atrim(client, param); - - default: - return -EINVAL; - } -} - static int __init x1205_init(void) { return i2c_add_driver(&x1205_driver); @@ -685,14 +609,11 @@ static void __exit x1205_exit(void) } MODULE_AUTHOR( - "Karen Spearel <kas11@tampabay.rr.com>, " + "Karen Spearel <kas111 at gmail dot com>, " "Alessandro Zummo <a.zummo@towertech.it>"); -MODULE_DESCRIPTION("Xicor X1205 RTC driver"); +MODULE_DESCRIPTION("Xicor/Intersil X1205 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -EXPORT_SYMBOL_GPL(x1205_do_command); -EXPORT_SYMBOL_GPL(x1205_direct_attach); - module_init(x1205_init); module_exit(x1205_exit); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2f720108a7e0..c1c6f1381150 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -437,8 +437,7 @@ dasd_forget_ranges(void) spin_lock(&dasd_devmap_lock); for (i = 0; i < 256; i++) { list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) { - if (devmap->device != NULL) - BUG(); + BUG_ON(devmap->device != NULL); list_del(&devmap->list); kfree(devmap); } @@ -547,8 +546,7 @@ dasd_delete_device(struct dasd_device *device) /* First remove device pointer from devmap. */ devmap = dasd_find_busid(device->cdev->dev.bus_id); - if (IS_ERR(devmap)) - BUG(); + BUG_ON(IS_ERR(devmap)); spin_lock(&dasd_devmap_lock); if (devmap->device != device) { spin_unlock(&dasd_devmap_lock); diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index bd06607a5dcc..eecb2afad5c2 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -28,6 +28,7 @@ #include <linux/major.h> #include <linux/kdev_t.h> #include <linux/device.h> +#include <linux/mutex.h> struct class *class3270; @@ -59,7 +60,7 @@ struct raw3270 { #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ /* Semaphore to protect global data of raw3270 (devices, views, etc). */ -static DECLARE_MUTEX(raw3270_sem); +static DEFINE_MUTEX(raw3270_mutex); /* List of 3270 devices. */ static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); @@ -815,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) * number for it. Note: there is no device with minor 0, * see special case for fs3270.c:fs3270_open(). */ - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); /* Keep the list sorted. */ minor = RAW3270_FIRSTMINOR; rp->minor = -1; @@ -832,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) rp->minor = minor; list_add_tail(&rp->list, &raw3270_devices); } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); /* No free minor number? Then give up. */ if (rp->minor == -1) return -EUSERS; @@ -1003,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) if (minor <= 0) return -ENODEV; - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); rc = -ENODEV; list_for_each_entry(rp, &raw3270_devices, list) { if (rp->minor != minor) @@ -1024,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); break; } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); return rc; } @@ -1038,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) struct raw3270_view *view, *tmp; unsigned long flags; - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); view = ERR_PTR(-ENODEV); list_for_each_entry(rp, &raw3270_devices, list) { if (rp->minor != minor) @@ -1057,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); break; } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); return view; } @@ -1104,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp) struct ccw_device *cdev; /* Remove from device chain. */ - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); if (rp->clttydev) class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); @@ -1112,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp) class_device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor)); list_del_init(&rp->list); - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); /* Disconnect from ccw_device. */ cdev = rp->cdev; @@ -1208,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int)) if (!np) return -ENOMEM; np->notifier = notifier; - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); list_add_tail(&np->list, &raw3270_notifier); list_for_each_entry(rp, &raw3270_devices, list) { get_device(&rp->cdev->dev); notifier(rp->minor, 1); } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); return 0; } @@ -1222,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int)) { struct raw3270_notifier *np; - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) if (np->notifier == notifier) { list_del(&np->list); kfree(np); break; } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); } /* @@ -1256,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev) goto failure; raw3270_create_attributes(rp); set_bit(RAW3270_FLAGS_READY, &rp->flags); - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) np->notifier(rp->minor, 1); - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); return 0; failure: @@ -1307,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev) } spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) np->notifier(rp->minor, 0); - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); /* Reset 3270 device. */ raw3270_reset_device(rp); @@ -1370,13 +1371,13 @@ raw3270_init(void) rc = ccw_driver_register(&raw3270_ccw_driver); if (rc == 0) { /* Create attributes for early (= console) device. */ - down(&raw3270_sem); + mutex_lock(&raw3270_mutex); class3270 = class_create(THIS_MODULE, "3270"); list_for_each_entry(rp, &raw3270_devices, list) { get_device(&rp->cdev->dev); raw3270_create_attributes(rp); } - up(&raw3270_sem); + mutex_unlock(&raw3270_mutex); } return rc; } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 95b92f317b6f..395cfc6a344f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit) device_unregister(&unit->sysfs_device); } -static void * -zfcp_mempool_alloc(gfp_t gfp_mask, void *size) -{ - return kmalloc((size_t) size, gfp_mask); -} - -static void -zfcp_mempool_free(void *element, void *size) -{ - kfree(element); -} - /* * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI * commands. @@ -853,51 +841,39 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { adapter->pool.fsf_req_erp = - mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR, - zfcp_mempool_alloc, zfcp_mempool_free, (void *) - sizeof(struct zfcp_fsf_req_pool_element)); - - if (NULL == adapter->pool.fsf_req_erp) + mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, + sizeof(struct zfcp_fsf_req_pool_element)); + if (!adapter->pool.fsf_req_erp) return -ENOMEM; adapter->pool.fsf_req_scsi = - mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR, - zfcp_mempool_alloc, zfcp_mempool_free, (void *) - sizeof(struct zfcp_fsf_req_pool_element)); - - if (NULL == adapter->pool.fsf_req_scsi) + mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, + sizeof(struct zfcp_fsf_req_pool_element)); + if (!adapter->pool.fsf_req_scsi) return -ENOMEM; adapter->pool.fsf_req_abort = - mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR, - zfcp_mempool_alloc, zfcp_mempool_free, (void *) - sizeof(struct zfcp_fsf_req_pool_element)); - - if (NULL == adapter->pool.fsf_req_abort) + mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, + sizeof(struct zfcp_fsf_req_pool_element)); + if (!adapter->pool.fsf_req_abort) return -ENOMEM; adapter->pool.fsf_req_status_read = - mempool_create(ZFCP_POOL_STATUS_READ_NR, - zfcp_mempool_alloc, zfcp_mempool_free, - (void *) sizeof(struct zfcp_fsf_req)); - - if (NULL == adapter->pool.fsf_req_status_read) + mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, + sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.fsf_req_status_read) return -ENOMEM; adapter->pool.data_status_read = - mempool_create(ZFCP_POOL_STATUS_READ_NR, - zfcp_mempool_alloc, zfcp_mempool_free, - (void *) sizeof(struct fsf_status_read_buffer)); - - if (NULL == adapter->pool.data_status_read) + mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, + sizeof(struct fsf_status_read_buffer)); + if (!adapter->pool.data_status_read) return -ENOMEM; adapter->pool.data_gid_pn = - mempool_create(ZFCP_POOL_DATA_GID_PN_NR, - zfcp_mempool_alloc, zfcp_mempool_free, (void *) - sizeof(struct zfcp_gid_pn_data)); - - if (NULL == adapter->pool.data_gid_pn) + mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, + sizeof(struct zfcp_gid_pn_data)); + if (!adapter->pool.data_gid_pn) return -ENOMEM; return 0; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 0ab26d01877b..0d2b447c50ed 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1026,7 +1026,7 @@ static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; } /* End twa_free_request_id() */ -/* This function will get parameter table entires from the firmware */ +/* This function will get parameter table entries from the firmware */ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) { TW_Command_Full *full_command_packet; diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 1c459343292b..bde3d5834ade 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -41,6 +41,8 @@ #include <linux/stat.h> #include <linux/pci.h> #include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/dma-mapping.h> #include <scsi/scsicam.h> #include <asm/dma.h> @@ -676,7 +678,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd if (pci_enable_device(PCI_Device)) continue; - if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) + if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK )) continue; Bus = PCI_Device->bus->number; @@ -831,7 +833,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd if (pci_enable_device(PCI_Device)) continue; - if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) + if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK)) continue; Bus = PCI_Device->bus->number; @@ -885,7 +887,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda if (pci_enable_device(PCI_Device)) continue; - if (pci_set_dma_mask(PCI_Device, (u64) 0xffffffff)) + if (pci_set_dma_mask(PCI_Device, DMA_32BIT_MASK)) continue; Bus = PCI_Device->bus->number; @@ -2896,7 +2898,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou */ if (HostAdapter->ActiveCommands[TargetID] == 0) HostAdapter->LastSequencePoint[TargetID] = jiffies; - else if (jiffies - HostAdapter->LastSequencePoint[TargetID] > 4 * HZ) { + else if (time_after(jiffies, HostAdapter->LastSequencePoint[TargetID] + 4 * HZ)) { HostAdapter->LastSequencePoint[TargetID] = jiffies; QueueTag = BusLogic_OrderedQueueTag; } diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index 9f45ae1745da..3dce21c78737 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -89,6 +89,7 @@ #include <linux/string.h> #include <linux/ioport.h> #include <linux/slab.h> +#include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/irq.h> @@ -1052,7 +1053,7 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev, if (pci_enable_device(pdev)) goto out; - if (pci_set_dma_mask(pdev, 0xffffffffULL)) { + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_WARNING "Unable to set 32bit DMA " "on inia100 adapter, ignoring.\n"); goto out_disable_device; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a16f8ded8f1d..8df4a0ea3761 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -32,6 +32,7 @@ #include <linux/slab.h> #include <linux/completion.h> #include <linux/blkdev.h> +#include <linux/dma-mapping.h> #include <asm/semaphore.h> #include <asm/uaccess.h> diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index c2596335549d..720330778648 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -45,6 +45,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/dma-mapping.h> #include <linux/syscalls.h> #include <linux/delay.h> #include <linux/smp_lock.h> @@ -806,8 +807,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, * to driver communication memory to be allocated below 2gig */ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) - if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) || - pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL)) + if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK)) goto out; pci_set_master(pdev); diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 5227a779c05c..a198d86667e9 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -28,6 +28,7 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/blkdev.h> +#include <linux/dma-mapping.h> #include <asm/system.h> #include <asm/io.h> @@ -2631,7 +2632,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_enable_device(pdev)) return -EIO; - if (!pci_set_dma_mask(pdev, 0xFFFFFFFFUL)) { + if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); } else { printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 6e6b293dcb28..b1b704a42efd 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -57,6 +57,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/smp_lock.h> +#include <linux/dma-mapping.h> #include <linux/timer.h> #include <linux/string.h> @@ -906,8 +907,8 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev } pci_set_master(pDev); - if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) && - pci_set_dma_mask(pDev, 0xffffffffULL)) + if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) && + pci_set_dma_mask(pDev, DMA_32BIT_MASK)) return -EINVAL; base_addr0_phys = pci_resource_start(pDev,0); diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index b3f9de8f7595..059eeee4b554 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -490,6 +490,7 @@ #include <linux/init.h> #include <linux/ctype.h> #include <linux/spinlock.h> +#include <linux/dma-mapping.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/io.h> @@ -1426,7 +1427,7 @@ static int port_detect(unsigned long port_base, unsigned int j, if (ha->pdev) { pci_set_master(ha->pdev); - if (pci_set_dma_mask(ha->pdev, 0xffffffff)) + if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) printk("%s: warning, pci_set_dma_mask failed.\n", ha->board_name); } diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 62e3cda859af..d5740bbdef3e 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -388,6 +388,7 @@ #include <linux/proc_fs.h> #include <linux/time.h> #include <linux/timer.h> +#include <linux/dma-mapping.h> #ifdef GDTH_RTC #include <linux/mc146818rtc.h> #endif @@ -671,7 +672,7 @@ static struct file_operations gdth_fops = { static struct notifier_block gdth_notifier = { gdth_halt, NULL, 0 }; - +static int notifier_disabled = 0; static void gdth_delay(int milliseconds) { @@ -4527,15 +4528,15 @@ static int __init gdth_detect(struct scsi_host_template *shtp) if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat &GDT_64BIT)|| /* 64-bit DMA only supported from FW >= x.43 */ (!ha->dma64_support)) { - if (pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffff)) { + if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { printk(KERN_WARNING "GDT-PCI %d: Unable to set 32-bit DMA\n", hanum); err = TRUE; } } else { shp->max_cmd_len = 16; - if (!pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffffffffffffULL)) { + if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) { printk("GDT-PCI %d: 64-bit DMA enabled\n", hanum); - } else if (pci_set_dma_mask(pcistr[ctr].pdev, 0xffffffff)) { + } else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) { printk(KERN_WARNING "GDT-PCI %d: Unable to set 64/32-bit DMA\n", hanum); err = TRUE; } @@ -4595,13 +4596,13 @@ static int __init gdth_detect(struct scsi_host_template *shtp) add_timer(&gdth_timer); #endif major = register_chrdev(0,"gdth",&gdth_fops); + notifier_disabled = 0; register_reboot_notifier(&gdth_notifier); } gdth_polling = FALSE; return gdth_ctr_vcount; } - static int gdth_release(struct Scsi_Host *shp) { int hanum; @@ -5632,10 +5633,14 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) char cmnd[MAX_COMMAND_SIZE]; #endif + if (notifier_disabled) + return NOTIFY_OK; + TRACE2(("gdth_halt() event %d\n",(int)event)); if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) return NOTIFY_DONE; + notifier_disabled = 1; printk("GDT-HA: Flushing all host drives .. "); for (hanum = 0; hanum < gdth_ctr_count; ++hanum) { gdth_flush(hanum); @@ -5679,7 +5684,6 @@ static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) #ifdef GDTH_STATISTICS del_timer(&gdth_timer); #endif - unregister_reboot_notifier(&gdth_notifier); return NOTIFY_OK; } diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index ea6f3c0e05d9..0cc7f65b584f 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -127,6 +127,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/jiffies.h> +#include <linux/dma-mapping.h> #include <asm/io.h> #include <scsi/scsi.h> @@ -2780,7 +2781,7 @@ static int tul_NewReturnNumberOfAdapters(void) if (((dRegValue & 0xFF00) >> 8) == 0xFF) dRegValue = 0; wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8)); - if (pci_set_dma_mask(pDev, 0xffffffff)) { + if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) { printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n"); continue; diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 481708d527ae..a4c0b04cfdbd 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -179,6 +179,7 @@ #include <linux/blkdev.h> #include <linux/types.h> +#include <linux/dma-mapping.h> #include <scsi/sg.h> @@ -7284,10 +7285,10 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr) * are guaranteed to be < 4G. */ if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && - !pci_set_dma_mask(ha->pcidev, 0xffffffffffffffffULL)) { + !pci_set_dma_mask(ha->pcidev, DMA_64BIT_MASK)) { (ha)->flags |= IPS_HA_ENH_SG; } else { - if (pci_set_dma_mask(ha->pcidev, 0xffffffffULL) != 0) { + if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) { printk(KERN_WARNING "Unable to set DMA Mask\n"); return ips_abort_init(ha, index); } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 7b82ff090d42..2068b66822b7 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -3200,8 +3200,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) * Data-Out PDU's within R2T-sequence can be quite big; * using mempool */ - ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX, - mempool_alloc_slab, mempool_free_slab, taskcache); + ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX, + taskcache); if (ctask->datapool == NULL) { kfifo_free(ctask->r2tqueue); iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 352df47bcaca..07017658ac56 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -38,18 +38,6 @@ #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ -static void * -lpfc_pool_kmalloc(gfp_t gfp_flags, void *data) -{ - return kmalloc((unsigned long)data, gfp_flags); -} - -static void -lpfc_pool_kfree(void *obj, void *data) -{ - kfree(obj); -} - int lpfc_mem_alloc(struct lpfc_hba * phba) { @@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba) pool->current_count++; } - phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, - lpfc_pool_kmalloc, lpfc_pool_kfree, - (void *)(unsigned long)sizeof(LPFC_MBOXQ_t)); + phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + sizeof(LPFC_MBOXQ_t)); if (!phba->mbox_mem_pool) goto fail_free_mbuf_pool; - phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, - lpfc_pool_kmalloc, lpfc_pool_kfree, - (void *)(unsigned long)sizeof(struct lpfc_nodelist)); + phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7144674bc8e6..80b68a2481b3 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -45,6 +45,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/dma-mapping.h> #include <scsi/scsicam.h> #include "scsi.h" @@ -2094,7 +2095,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) memcpy(*pdev, adapter->dev, sizeof(struct pci_dev)); - if( pci_set_dma_mask(*pdev, 0xffffffff) != 0 ) { + if( pci_set_dma_mask(*pdev, DMA_32BIT_MASK) != 0 ) { kfree(*pdev); return -1; } @@ -4859,10 +4860,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set the Mode of addressing to 64 bit if we can */ if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { - pci_set_dma_mask(pdev, 0xffffffffffffffffULL); + pci_set_dma_mask(pdev, DMA_64BIT_MASK); adapter->has_64bit_addr = 1; } else { - pci_set_dma_mask(pdev, 0xffffffff); + pci_set_dma_mask(pdev, DMA_32BIT_MASK); adapter->has_64bit_addr = 0; } diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index a279ebb61447..30ee0ef4b459 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -38,6 +38,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/ctype.h> +#include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/system.h> @@ -2776,7 +2777,7 @@ static int nsp32_detect(struct scsi_host_template *sht) /* * setup DMA */ - if (pci_set_dma_mask(PCIDEV, 0xffffffffUL) != 0) { + if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) { nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); goto scsi_unregister; } diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 66ea47a9c53c..e3bd4bc339f4 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -49,6 +49,7 @@ static const char * osst_version = "0.99.4"; #include <linux/blkdev.h> #include <linux/moduleparam.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <asm/uaccess.h> #include <asm/dma.h> #include <asm/system.h> @@ -856,7 +857,7 @@ static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt ) && result >= 0) { #if DEBUG - if (debugging || jiffies - startwait >= 2*HZ/OSST_POLL_PER_SEC) + if (debugging || time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC)) printk (OSST_DEB_MSG "%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n", name, curr, curr+minlast, STp->first_frame_position, @@ -867,7 +868,7 @@ static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt return 0; } #if DEBUG - if (jiffies - startwait >= 2*HZ/OSST_POLL_PER_SEC && notyetprinted) + if (time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC) && notyetprinted) { printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n", name, curr, curr+minlast, STp->first_frame_position, diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 05347eed9dd5..fee843fab1c7 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -18,6 +18,7 @@ #include <linux/parport.h> #include <linux/workqueue.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <asm/io.h> #include <scsi/scsi.h> @@ -726,7 +727,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) retv--; if (retv) { - if ((jiffies - dev->jstart) > (1 * HZ)) { + if (time_after(jiffies, dev->jstart + (1 * HZ))) { printk ("ppa: Parallel port cable is unplugged!!\n"); ppa_fail(dev, DID_BUS_BUSY); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index e0230249fa0f..5a48e55f9418 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -350,6 +350,7 @@ #include <linux/pci_ids.h> #include <linux/interrupt.h> #include <linux/init.h> +#include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/irq.h> @@ -4321,7 +4322,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) #ifdef QLA_64BIT_PTR if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { - if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { + if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) { printk(KERN_WARNING "scsi(%li): Unable to set a " "suitable DMA mask - aborting\n", ha->host_no); error = -ENODEV; @@ -4331,7 +4332,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", ha->host_no); #else - if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { + if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) { printk(KERN_WARNING "scsi(%li): Unable to set a " "suitable DMA mask - aborting\n", ha->host_no); error = -ENODEV; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 029bbf461bb2..017729c59a49 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2154,8 +2154,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t *ha) int rval; rval = QLA_SUCCESS; - ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, - mempool_free_slab, srb_cachep); + ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (ha->srb_mempool == NULL) { qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); rval = QLA_FUNCTION_FAILED; diff --git a/drivers/scsi/qlogicfc.c b/drivers/scsi/qlogicfc.c index 94ef3f08d378..52b224a5d6fd 100644 --- a/drivers/scsi/qlogicfc.c +++ b/drivers/scsi/qlogicfc.c @@ -61,6 +61,8 @@ #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/jiffies.h> #include <asm/io.h> #include <asm/irq.h> #include "scsi.h" @@ -737,8 +739,8 @@ static int isp2x00_detect(struct scsi_host_template * tmpt) continue; /* Try to configure DMA attributes. */ - if (pci_set_dma_mask(pdev, 0xffffffffffffffffULL) && - pci_set_dma_mask(pdev, 0xffffffffULL)) + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) && + pci_set_dma_mask(pdev, DMA_32BIT_MASK)) continue; host = scsi_register(tmpt, sizeof(struct isp2x00_hostdata)); @@ -1325,7 +1327,7 @@ static int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *)) cmd->control_flags = cpu_to_le16(CFLAG_READ); if (Cmnd->device->tagged_supported) { - if ((jiffies - hostdata->tag_ages[Cmnd->device->id]) > (2 * ISP_TIMEOUT)) { + if (time_after(jiffies, hostdata->tag_ages[Cmnd->device->id] + (2 * ISP_TIMEOUT))) { cmd->control_flags |= cpu_to_le16(CFLAG_ORDERED_TAG); hostdata->tag_ages[Cmnd->device->id] = jiffies; } else diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 1fd5fc6d0fe3..c7e78dcf09df 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -24,6 +24,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/jiffies.h> #include <asm/byteorder.h> @@ -1017,7 +1018,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, if (Cmnd->device->tagged_supported) { if (qpti->cmd_count[Cmnd->device->id] == 0) qpti->tag_ages[Cmnd->device->id] = jiffies; - if ((jiffies - qpti->tag_ages[Cmnd->device->id]) > (5*HZ)) { + if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { cmd->control_flags = CFLAG_ORDERED_TAG; qpti->tag_ages[Cmnd->device->id] = jiffies; } else diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ede158d08d9d..8f010a314a3d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void) sgp->name); } - sgp->pool = mempool_create(SG_MEMPOOL_SIZE, - mempool_alloc_slab, mempool_free_slab, - sgp->slab); + sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, + sgp->slab); if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 5996d3cd0ed8..674b15c78f68 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -1528,7 +1528,7 @@ static int serial8250_startup(struct uart_port *port) /* * Clear the FIFO buffers and disable them. - * (they will be reeanbled in set_termios()) + * (they will be reenabled in set_termios()) */ serial8250_clear_fifos(up); diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c index b848b7d94412..3bdee64d1a99 100644 --- a/drivers/serial/serial_txx9.c +++ b/drivers/serial/serial_txx9.c @@ -483,7 +483,7 @@ static int serial_txx9_startup(struct uart_port *port) /* * Clear the FIFO buffers and disable them. - * (they will be reeanbled in set_termios()) + * (they will be reenabled in set_termios()) */ sio_set(up, TXX9_SIFCR, TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE); diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 9fe2283d91e5..1c4396c2962d 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -641,7 +641,7 @@ static int sunsu_startup(struct uart_port *port) /* * Clear the FIFO buffers and disable them. - * (they will be reeanbled in set_termios()) + * (they will be reenabled in set_termios()) */ if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) { serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c index 3c987f49f6b4..e166fffea86b 100644 --- a/drivers/telephony/phonedev.c +++ b/drivers/telephony/phonedev.c @@ -29,6 +29,7 @@ #include <linux/kmod.h> #include <linux/sem.h> #include <linux/devfs_fs_kernel.h> +#include <linux/mutex.h> #define PHONE_NUM_DEVICES 256 @@ -37,7 +38,7 @@ */ static struct phone_device *phone_device[PHONE_NUM_DEVICES]; -static DECLARE_MUTEX(phone_lock); +static DEFINE_MUTEX(phone_lock); /* * Open a phone device. @@ -48,19 +49,19 @@ static int phone_open(struct inode *inode, struct file *file) unsigned int minor = iminor(inode); int err = 0; struct phone_device *p; - struct file_operations *old_fops, *new_fops = NULL; + const struct file_operations *old_fops, *new_fops = NULL; if (minor >= PHONE_NUM_DEVICES) return -ENODEV; - down(&phone_lock); + mutex_lock(&phone_lock); p = phone_device[minor]; if (p) new_fops = fops_get(p->f_op); if (!new_fops) { - up(&phone_lock); + mutex_unlock(&phone_lock); request_module("char-major-%d-%d", PHONE_MAJOR, minor); - down(&phone_lock); + mutex_lock(&phone_lock); p = phone_device[minor]; if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL) { @@ -78,7 +79,7 @@ static int phone_open(struct inode *inode, struct file *file) } fops_put(old_fops); end: - up(&phone_lock); + mutex_unlock(&phone_lock); return err; } @@ -100,18 +101,18 @@ int phone_register_device(struct phone_device *p, int unit) end = unit + 1; /* enter the loop at least one time */ } - down(&phone_lock); + mutex_lock(&phone_lock); for (i = base; i < end; i++) { if (phone_device[i] == NULL) { phone_device[i] = p; p->minor = i; devfs_mk_cdev(MKDEV(PHONE_MAJOR,i), S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i); - up(&phone_lock); + mutex_unlock(&phone_lock); return 0; } } - up(&phone_lock); + mutex_unlock(&phone_lock); return -ENFILE; } @@ -121,12 +122,12 @@ int phone_register_device(struct phone_device *p, int unit) void phone_unregister_device(struct phone_device *pfd) { - down(&phone_lock); + mutex_lock(&phone_lock); if (phone_device[pfd->minor] != pfd) panic("phone: bad unregister"); devfs_remove("phone/%d", pfd->minor); phone_device[pfd->minor] = NULL; - up(&phone_lock); + mutex_unlock(&phone_lock); } diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 37b13368c814..b263a54a13c0 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -24,15 +24,15 @@ #include "usb.h" #define MAX_USB_MINORS 256 -static struct file_operations *usb_minors[MAX_USB_MINORS]; +static const struct file_operations *usb_minors[MAX_USB_MINORS]; static DEFINE_SPINLOCK(minor_lock); static int usb_open(struct inode * inode, struct file * file) { int minor = iminor(inode); - struct file_operations *c; + const struct file_operations *c; int err = -ENODEV; - struct file_operations *old_fops, *new_fops = NULL; + const struct file_operations *old_fops, *new_fops = NULL; spin_lock (&minor_lock); c = usb_minors[minor]; diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c index 4b55285de9a0..fe0ed54fa0ae 100644 --- a/drivers/usb/core/notify.c +++ b/drivers/usb/core/notify.c @@ -16,57 +16,7 @@ #include <linux/mutex.h> #include "usb.h" - -static struct notifier_block *usb_notifier_list; -static DEFINE_MUTEX(usb_notifier_lock); - -static void usb_notifier_chain_register(struct notifier_block **list, - struct notifier_block *n) -{ - mutex_lock(&usb_notifier_lock); - while (*list) { - if (n->priority > (*list)->priority) - break; - list = &((*list)->next); - } - n->next = *list; - *list = n; - mutex_unlock(&usb_notifier_lock); -} - -static void usb_notifier_chain_unregister(struct notifier_block **nl, - struct notifier_block *n) -{ - mutex_lock(&usb_notifier_lock); - while ((*nl)!=NULL) { - if ((*nl)==n) { - *nl = n->next; - goto exit; - } - nl=&((*nl)->next); - } -exit: - mutex_unlock(&usb_notifier_lock); -} - -static int usb_notifier_call_chain(struct notifier_block **n, - unsigned long val, void *v) -{ - int ret=NOTIFY_DONE; - struct notifier_block *nb = *n; - - mutex_lock(&usb_notifier_lock); - while (nb) { - ret = nb->notifier_call(nb,val,v); - if (ret&NOTIFY_STOP_MASK) { - goto exit; - } - nb = nb->next; - } -exit: - mutex_unlock(&usb_notifier_lock); - return ret; -} +static BLOCKING_NOTIFIER_HEAD(usb_notifier_list); /** * usb_register_notify - register a notifier callback whenever a usb change happens @@ -76,7 +26,7 @@ exit: */ void usb_register_notify(struct notifier_block *nb) { - usb_notifier_chain_register(&usb_notifier_list, nb); + blocking_notifier_chain_register(&usb_notifier_list, nb); } EXPORT_SYMBOL_GPL(usb_register_notify); @@ -89,27 +39,28 @@ EXPORT_SYMBOL_GPL(usb_register_notify); */ void usb_unregister_notify(struct notifier_block *nb) { - usb_notifier_chain_unregister(&usb_notifier_list, nb); + blocking_notifier_chain_unregister(&usb_notifier_list, nb); } EXPORT_SYMBOL_GPL(usb_unregister_notify); void usb_notify_add_device(struct usb_device *udev) { - usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev); + blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev); } void usb_notify_remove_device(struct usb_device *udev) { - usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_REMOVE, udev); + blocking_notifier_call_chain(&usb_notifier_list, + USB_DEVICE_REMOVE, udev); } void usb_notify_add_bus(struct usb_bus *ubus) { - usb_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus); + blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus); } void usb_notify_remove_bus(struct usb_bus *ubus) { - usb_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus); + blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus); } diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index b44cfda76b61..3f618ce6998d 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1581,7 +1581,7 @@ restart: static struct inode * gadgetfs_create_file (struct super_block *sb, char const *name, - void *data, struct file_operations *fops, + void *data, const struct file_operations *fops, struct dentry **dentry_p); static int activate_ep_files (struct dev_data *dev) @@ -1955,7 +1955,7 @@ module_param (default_perm, uint, 0644); static struct inode * gadgetfs_make_inode (struct super_block *sb, - void *data, struct file_operations *fops, + void *data, const struct file_operations *fops, int mode) { struct inode *inode = new_inode (sb); @@ -1979,7 +1979,7 @@ gadgetfs_make_inode (struct super_block *sb, */ static struct inode * gadgetfs_create_file (struct super_block *sb, char const *name, - void *data, struct file_operations *fops, + void *data, const struct file_operations *fops, struct dentry **dentry_p) { struct dentry *dentry; diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index 372527a83593..682bf2215660 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c @@ -158,7 +158,7 @@ static int ohci_s3c2410_hub_control ( "s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n", hcd, typeReq, wValue, wIndex, buf, wLength); - /* if we are only an humble host without any special capabilites + /* if we are only an humble host without any special capabilities * process the request straight away and exit */ if (info == NULL) { diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c index 9c5ab251370c..f7ac9d6b9856 100644 --- a/drivers/usb/net/zaurus.c +++ b/drivers/usb/net/zaurus.c @@ -217,7 +217,7 @@ static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf) * with devices that use it and those that don't. */ if ((detail->bDetailData[1] & ~0x02) != 0x01) { - /* bmDataCapabilites == 0 would be fine too, + /* bmDataCapabilities == 0 would be fine too, * but framing is minidriver-coupled for now. */ bad_detail: diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index fdebd60a3250..22e9d696fdd2 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -70,6 +70,22 @@ config FB_MACMODES depends on FB default n +config FB_FIRMWARE_EDID + bool "Enable firmware EDID" + depends on FB + default y + ---help--- + This enables access to the EDID transferred from the firmware. + On the i386, this is from the Video BIOS. Enable this if DDC/I2C + transfers do not work for your driver and if you are using + nvidiafb, i810fb or savagefb. + + In general, choosing Y for this option is safe. If you + experience extremely long delays while booting before you get + something on your display, try setting this to N. Matrox cards in + combination with certain motherboards and monitors are known to + suffer from this problem. + config FB_MODE_HELPERS bool "Enable Video Mode Handling Helpers" depends on FB @@ -1202,6 +1218,17 @@ config FB_AU1100 bool "Au1100 LCD Driver" depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y +config FB_AU1200 + bool "Au1200 LCD Driver" + depends on FB && MIPS && SOC_AU1200 + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + This is the framebuffer driver for the AMD Au1200 SOC. It can drive + various panels and CRTs by passing in kernel cmd line option + au1200fb:panel=<name>. + source "drivers/video/geode/Kconfig" config FB_FFB diff --git a/drivers/video/Makefile b/drivers/video/Makefile index aa434e725c0d..cb90218515ac 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -86,6 +86,7 @@ obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o obj-$(CONFIG_FB_PXA) += pxafb.o obj-$(CONFIG_FB_W100) += w100fb.o obj-$(CONFIG_FB_AU1100) += au1100fb.o +obj-$(CONFIG_FB_AU1200) += au1200fb.o obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index 76448d6ae896..98baecccb3fd 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c @@ -1308,7 +1308,7 @@ static int __init acornfb_probe(struct platform_device *dev) /* * Try to select a suitable default mode */ - for (i = 0; i < sizeof(modedb) / sizeof(*modedb); i++) { + for (i = 0; i < ARRAY_SIZE(modedb); i++) { unsigned long hs; hs = modedb[i].refresh * @@ -1380,7 +1380,7 @@ static int __init acornfb_probe(struct platform_device *dev) */ free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE); #endif - + fb_info.fix.smem_len = size; current_par.palette_size = VIDC_PALETTE_SIZE; @@ -1391,7 +1391,7 @@ static int __init acornfb_probe(struct platform_device *dev) */ do { rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, - sizeof(modedb) / sizeof(*modedb), + ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); /* * If we found an exact match, all ok. @@ -1408,7 +1408,7 @@ static int __init acornfb_probe(struct platform_device *dev) break; rc = fb_find_mode(&fb_info.var, &fb_info, NULL, modedb, - sizeof(modedb) / sizeof(*modedb), + ARRAY_SIZE(modedb), &acornfb_default_mode, DEFAULT_BPP); if (rc) break; diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c index c924d81f7978..29f9f0dfe3b4 100644 --- a/drivers/video/asiliantfb.c +++ b/drivers/video/asiliantfb.c @@ -353,8 +353,6 @@ struct chips_init_reg { unsigned char data; }; -#define N_ELTS(x) (sizeof(x) / sizeof(x[0])) - static struct chips_init_reg chips_init_sr[] = { {0x00, 0x03}, /* Reset register */ @@ -460,22 +458,22 @@ static void __devinit chips_hw_init(struct fb_info *p) { int i; - for (i = 0; i < N_ELTS(chips_init_xr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_xr); ++i) write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); write_xr(0x81, 0x12); write_xr(0x82, 0x08); write_xr(0x20, 0x00); - for (i = 0; i < N_ELTS(chips_init_sr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_sr); ++i) write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); - for (i = 0; i < N_ELTS(chips_init_gr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_gr); ++i) write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); - for (i = 0; i < N_ELTS(chips_init_ar); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_ar); ++i) write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); /* Enable video output in attribute index register */ writeb(0x20, mmio_base + 0x780); - for (i = 0; i < N_ELTS(chips_init_cr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_cr); ++i) write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); - for (i = 0; i < N_ELTS(chips_init_fr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_fr); ++i) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 620c9a934e0e..821c6da8e42c 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1725,9 +1725,9 @@ static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id * strcpy(video_card, "Rage128 XX "); video_card[8] = ent->device >> 8; video_card[9] = ent->device & 0xFF; - + /* range check to make sure */ - if (ent->driver_data < (sizeof(r128_family)/sizeof(char *))) + if (ent->driver_data < ARRAY_SIZE(r128_family)) strncat(video_card, r128_family[ent->driver_data], sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 485be386a8ff..e799fcca365a 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -434,7 +434,7 @@ static int __devinit correct_chipset(struct atyfb_par *par) const char *name; int i; - for (i = sizeof(aty_chips) / sizeof(*aty_chips) - 1; i >= 0; i--) + for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) if (par->pci_id == aty_chips[i].pci_id) break; @@ -2168,10 +2168,10 @@ static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk) if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { refresh_tbl = ragexl_tbl; - size = sizeof(ragexl_tbl)/sizeof(int); + size = ARRAY_SIZE(ragexl_tbl); } else { refresh_tbl = ragepro_tbl; - size = sizeof(ragepro_tbl)/sizeof(int); + size = ARRAY_SIZE(ragepro_tbl); } for (i=0; i < size; i++) { @@ -2298,6 +2298,10 @@ static int __init aty_init(struct fb_info *info, const char *name) case CLK_ATI18818_1: par->pll_ops = &aty_pll_ati18818_1; break; + case CLK_IBMRGB514: + par->pll_ops = &aty_pll_ibm514; + break; +#if 0 /* dead code */ case CLK_STG1703: par->pll_ops = &aty_pll_stg1703; break; @@ -2307,9 +2311,7 @@ static int __init aty_init(struct fb_info *info, const char *name) case CLK_ATT20C408: par->pll_ops = &aty_pll_att20c408; break; - case CLK_IBMRGB514: - par->pll_ops = &aty_pll_ibm514; - break; +#endif default: PRINTKI("aty_init: CLK type not implemented yet!"); par->pll_ops = &aty_pll_unsupported; @@ -3397,7 +3399,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi struct atyfb_par *par; int i, rc = -ENOMEM; - for (i = sizeof(aty_chips) / sizeof(*aty_chips) - 1; i >= 0; i--) + for (i = ARRAY_SIZE(aty_chips); i >= 0; i--) if (pdev->device == aty_chips[i].pci_id) break; diff --git a/drivers/video/aty/mach64_gx.c b/drivers/video/aty/mach64_gx.c index 01fdff79483b..2045639cb671 100644 --- a/drivers/video/aty/mach64_gx.c +++ b/drivers/video/aty/mach64_gx.c @@ -149,8 +149,7 @@ static int aty_var_to_pll_514(const struct fb_info *info, u32 vclk_per, }; int i; - for (i = 0; i < sizeof(RGB514_clocks) / sizeof(*RGB514_clocks); - i++) + for (i = 0; i < ARRAY_SIZE(RGB514_clocks); i++) if (vclk_per <= RGB514_clocks[i].limit) { pll->ibm514.m = RGB514_clocks[i].m; pll->ibm514.n = RGB514_clocks[i].n; diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index c9f0c5a07e6e..9a6b5b39b88e 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -1067,7 +1067,7 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green, if (regno > 255) - return 1; + return -EINVAL; red >>= 8; green >>= 8; @@ -1086,9 +1086,9 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green, pindex = regno * 8; if (rinfo->depth == 16 && regno > 63) - return 1; + return -EINVAL; if (rinfo->depth == 15 && regno > 31) - return 1; + return -EINVAL; /* For 565, the green component is mixed one order * below diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c new file mode 100644 index 000000000000..b367de30b98c --- /dev/null +++ b/drivers/video/au1200fb.c @@ -0,0 +1,3844 @@ +/* + * BRIEF MODULE DESCRIPTION + * Au1200 LCD Driver. + * + * Copyright 2004-2005 AMD + * Author: AMD + * + * Based on: + * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device + * Created 28 Dec 1997 by Geert Uytterhoeven + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> + +#include <asm/mach-au1x00/au1000.h> +#include "au1200fb.h" + +#ifdef CONFIG_PM +#include <asm/mach-au1x00/au1xxx_pm.h> +#endif + +#ifndef CONFIG_FB_AU1200_DEVS +#define CONFIG_FB_AU1200_DEVS 4 +#endif + +#define DRIVER_NAME "au1200fb" +#define DRIVER_DESC "LCD controller driver for AU1200 processors" + +#define DEBUG 1 + +#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg) +#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg) +#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg) + +#if DEBUG +#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg) +#else +#define print_dbg(f, arg...) do {} while (0) +#endif + + +#define AU1200_LCD_FB_IOCTL 0x46FF + +#define AU1200_LCD_SET_SCREEN 1 +#define AU1200_LCD_GET_SCREEN 2 +#define AU1200_LCD_SET_WINDOW 3 +#define AU1200_LCD_GET_WINDOW 4 +#define AU1200_LCD_SET_PANEL 5 +#define AU1200_LCD_GET_PANEL 6 + +#define SCREEN_SIZE (1<< 1) +#define SCREEN_BACKCOLOR (1<< 2) +#define SCREEN_BRIGHTNESS (1<< 3) +#define SCREEN_COLORKEY (1<< 4) +#define SCREEN_MASK (1<< 5) + +struct au1200_lcd_global_regs_t { + unsigned int flags; + unsigned int xsize; + unsigned int ysize; + unsigned int backcolor; + unsigned int brightness; + unsigned int colorkey; + unsigned int mask; + unsigned int panel_choice; + char panel_desc[80]; + +}; + +#define WIN_POSITION (1<< 0) +#define WIN_ALPHA_COLOR (1<< 1) +#define WIN_ALPHA_MODE (1<< 2) +#define WIN_PRIORITY (1<< 3) +#define WIN_CHANNEL (1<< 4) +#define WIN_BUFFER_FORMAT (1<< 5) +#define WIN_COLOR_ORDER (1<< 6) +#define WIN_PIXEL_ORDER (1<< 7) +#define WIN_SIZE (1<< 8) +#define WIN_COLORKEY_MODE (1<< 9) +#define WIN_DOUBLE_BUFFER_MODE (1<< 10) +#define WIN_RAM_ARRAY_MODE (1<< 11) +#define WIN_BUFFER_SCALE (1<< 12) +#define WIN_ENABLE (1<< 13) + +struct au1200_lcd_window_regs_t { + unsigned int flags; + unsigned int xpos; + unsigned int ypos; + unsigned int alpha_color; + unsigned int alpha_mode; + unsigned int priority; + unsigned int channel; + unsigned int buffer_format; + unsigned int color_order; + unsigned int pixel_order; + unsigned int xsize; + unsigned int ysize; + unsigned int colorkey_mode; + unsigned int double_buffer_mode; + unsigned int ram_array_mode; + unsigned int xscale; + unsigned int yscale; + unsigned int enable; +}; + + +struct au1200_lcd_iodata_t { + unsigned int subcmd; + struct au1200_lcd_global_regs_t global; + struct au1200_lcd_window_regs_t window; +}; + +#if defined(__BIG_ENDIAN) +#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11 +#else +#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00 +#endif +#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565 + +/* Private, per-framebuffer management information (independent of the panel itself) */ +struct au1200fb_device { + struct fb_info fb_info; /* FB driver info record */ + + int plane; + unsigned char* fb_mem; /* FrameBuffer memory map */ + unsigned int fb_len; + dma_addr_t fb_phys; +}; + +static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS]; +/********************************************************************/ + +/* LCD controller restrictions */ +#define AU1200_LCD_MAX_XRES 1280 +#define AU1200_LCD_MAX_YRES 1024 +#define AU1200_LCD_MAX_BPP 32 +#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */ +#define AU1200_LCD_NBR_PALETTE_ENTRIES 256 + +/* Default number of visible screen buffer to allocate */ +#define AU1200FB_NBR_VIDEO_BUFFERS 1 + +/********************************************************************/ + +static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR; +static int window_index = 2; /* default is zero */ +static int panel_index = 2; /* default is zero */ +static struct window_settings *win; +static struct panel_settings *panel; +static int noblanking = 1; +static int nohwcursor = 0; + +struct window_settings { + unsigned char name[64]; + uint32 mode_backcolor; + uint32 mode_colorkey; + uint32 mode_colorkeymsk; + struct { + int xres; + int yres; + int xpos; + int ypos; + uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */ + uint32 mode_winenable; + } w[4]; +}; + +#if defined(__BIG_ENDIAN) +#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00 +#else +#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01 +#endif + +extern int board_au1200fb_panel_init (void); +extern int board_au1200fb_panel_shutdown (void); + +#ifdef CONFIG_PM +int au1200fb_pm_callback(au1xxx_power_dev_t *dev, + au1xxx_request_t request, void *data); +au1xxx_power_dev_t *LCD_pm_dev; +#endif + +/* + * Default window configurations + */ +static struct window_settings windows[] = { + { /* Index 0 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 100, 100, 100, 100, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ LCD_WINENABLE_WEN1, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + + { /* Index 1 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 320, 240, 5, 5, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP | + LCD_WINCTRL1_PO_00, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 + | LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 100, 100, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/, + }, + { + /* xres, yres, xpos, ypos */ 200, 25, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + { /* Index 2 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP | + LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + /* Need VGA 640 @ 24bpp, @ 32bpp */ + /* Need VGA 800 @ 24bpp, @ 32bpp */ + /* Need VGA 1024 @ 24bpp, @ 32bpp */ +}; + +/* + * Controller configurations for various panels. + */ + +struct panel_settings +{ + const char name[25]; /* Full name <vendor>_<model> */ + + struct fb_monspecs monspecs; /* FB monitor specs */ + + /* panel timings */ + uint32 mode_screen; + uint32 mode_horztiming; + uint32 mode_verttiming; + uint32 mode_clkcontrol; + uint32 mode_pwmdiv; + uint32 mode_pwmhi; + uint32 mode_outmask; + uint32 mode_fifoctrl; + uint32 mode_toyclksrc; + uint32 mode_backlight; + uint32 mode_auxpll; + int (*device_init)(void); + int (*device_shutdown)(void); +#define Xres min_xres +#define Yres min_yres + u32 min_xres; /* Minimum horizontal resolution */ + u32 max_xres; /* Maximum horizontal resolution */ + u32 min_yres; /* Minimum vertical resolution */ + u32 max_yres; /* Maximum vertical resolution */ +}; + +/********************************************************************/ +/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */ + +/* List of panels known to work with the AU1200 LCD controller. + * To add a new panel, enter the same specifications as the + * Generic_TFT one, and MAKE SURE that it doesn't conflicts + * with the controller restrictions. Restrictions are: + * + * STN color panels: max_bpp <= 12 + * STN mono panels: max_bpp <= 4 + * TFT panels: max_bpp <= 16 + * max_xres <= 800 + * max_yres <= 600 + */ +static struct panel_settings known_lcd_panels[] = +{ + [0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */ + .name = "QVGA_320x240", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(320) | + LCD_SCREEN_SY_N(240), + .mode_horztiming = 0x00c4623b, + .mode_verttiming = 0x00502814, + .mode_clkcontrol = 0x00020002, /* /4=24Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 320, 320, + 240, 240, + }, + + [1] = { /* VGA 640x480 H:30.3kHz V:58Hz */ + .name = "VGA_640x480", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x13f9df80, + .mode_horztiming = 0x003c5859, + .mode_verttiming = 0x00741201, + .mode_clkcontrol = 0x00020001, /* /4=24Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 640, 480, + 640, 480, + }, + + [2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */ + .name = "SVGA_800x600", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x18fa5780, + .mode_horztiming = 0x00dc7e77, + .mode_verttiming = 0x00584805, + .mode_clkcontrol = 0x00020000, /* /2=48Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 800, 800, + 600, 600, + }, + + [3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */ + .name = "XVGA_1024x768", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x1ffaff80, + .mode_horztiming = 0x007d0e57, + .mode_verttiming = 0x00740a01, + .mode_clkcontrol = 0x000A0000, /* /1 */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 6, /* 72MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 1024, 1024, + 768, 768, + }, + + [4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */ + .name = "XVGA_1280x1024", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x27fbff80, + .mode_horztiming = 0x00cdb2c7, + .mode_verttiming = 0x00600002, + .mode_clkcontrol = 0x000A0000, /* /1 */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 10, /* 120MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 1280, 1280, + 1024, 1024, + }, + + [5] = { /* Samsung 1024x768 TFT */ + .name = "Samsung_1024x768_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x1ffaff80, + .mode_horztiming = 0x018cc677, + .mode_verttiming = 0x00241217, + .mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, /* SCB 0x0 */ + .mode_pwmhi = 0x03400000, /* SCB 0x0 */ + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 1024, 1024, + 768, 768, + }, + + [6] = { /* Toshiba 640x480 TFT */ + .name = "Toshiba_640x480_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(640) | + LCD_SCREEN_SY_N(480), + .mode_horztiming = LCD_HORZTIMING_HPW_N(96) | + LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51), + .mode_verttiming = LCD_VERTTIMING_VPW_N(2) | + LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32), + .mode_clkcontrol = 0x00000000, /* /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 640, 480, + 640, 480, + }, + + [7] = { /* Sharp 320x240 TFT */ + .name = "Sharp_320x240_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 12500, + .hfmax = 20000, + .vfmin = 38, + .vfmax = 81, + .dclkmin = 4500000, + .dclkmax = 6800000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(320) | + LCD_SCREEN_SY_N(240), + .mode_horztiming = LCD_HORZTIMING_HPW_N(60) | + LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2), + .mode_verttiming = LCD_VERTTIMING_VPW_N(2) | + LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5), + .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 320, 320, + 240, 240, + }, + + [8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */ + .name = "Toppoly_TD070WGCB2", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(856) | + LCD_SCREEN_SY_N(480), + .mode_horztiming = LCD_HORZTIMING_HND2_N(43) | + LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114), + .mode_verttiming = LCD_VERTTIMING_VND2_N(20) | + LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4), + .mode_clkcontrol = 0x00020001, /* /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 856, 856, + 480, 480, + }, +}; + +#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels)) + +/********************************************************************/ + +#ifdef CONFIG_PM +static int set_brightness(unsigned int brightness) +{ + unsigned int hi1, divider; + + /* limit brightness pwm duty to >= 30/1600 */ + if (brightness < 30) { + brightness = 30; + } + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + hi1 = (lcd->pwmhi >> 16) + 1; + hi1 = (((brightness & 0xFF) + 1) * divider >> 8); + lcd->pwmhi &= 0xFFFF; + lcd->pwmhi |= (hi1 << 16); + + return brightness; +} +#endif /* CONFIG_PM */ + +static int winbpp (unsigned int winctrl1) +{ + int bits = 0; + + /* how many bits are needed for each pixel format */ + switch (winctrl1 & LCD_WINCTRL1_FRM) { + case LCD_WINCTRL1_FRM_1BPP: + bits = 1; + break; + case LCD_WINCTRL1_FRM_2BPP: + bits = 2; + break; + case LCD_WINCTRL1_FRM_4BPP: + bits = 4; + break; + case LCD_WINCTRL1_FRM_8BPP: + bits = 8; + break; + case LCD_WINCTRL1_FRM_12BPP: + case LCD_WINCTRL1_FRM_16BPP655: + case LCD_WINCTRL1_FRM_16BPP565: + case LCD_WINCTRL1_FRM_16BPP556: + case LCD_WINCTRL1_FRM_16BPPI1555: + case LCD_WINCTRL1_FRM_16BPPI5551: + case LCD_WINCTRL1_FRM_16BPPA1555: + case LCD_WINCTRL1_FRM_16BPPA5551: + bits = 16; + break; + case LCD_WINCTRL1_FRM_24BPP: + case LCD_WINCTRL1_FRM_32BPP: + bits = 32; + break; + } + + return bits; +} + +static int fbinfo2index (struct fb_info *fb_info) +{ + int i; + + for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) { + if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info)) + return i; + } + printk("au1200fb: ERROR: fbinfo2index failed!\n"); + return -1; +} + +static int au1200_setlocation (struct au1200fb_device *fbdev, int plane, + int xpos, int ypos) +{ + uint32 winctrl0, winctrl1, winenable, fb_offset = 0; + int xsz, ysz; + + /* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */ + + winctrl0 = lcd->window[plane].winctrl0; + winctrl1 = lcd->window[plane].winctrl1; + winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN); + winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY); + + /* Check for off-screen adjustments */ + xsz = win->w[plane].xres; + ysz = win->w[plane].yres; + if ((xpos + win->w[plane].xres) > panel->Xres) { + /* Off-screen to the right */ + xsz = panel->Xres - xpos; /* off by 1 ??? */ + /*printk("off screen right\n");*/ + } + + if ((ypos + win->w[plane].yres) > panel->Yres) { + /* Off-screen to the bottom */ + ysz = panel->Yres - ypos; /* off by 1 ??? */ + /*printk("off screen bottom\n");*/ + } + + if (xpos < 0) { + /* Off-screen to the left */ + xsz = win->w[plane].xres + xpos; + fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8); + xpos = 0; + /*printk("off screen left\n");*/ + } + + if (ypos < 0) { + /* Off-screen to the top */ + ysz = win->w[plane].yres + ypos; + /* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */ + ypos = 0; + /*printk("off screen top\n");*/ + } + + /* record settings */ + win->w[plane].xpos = xpos; + win->w[plane].ypos = ypos; + + xsz -= 1; + ysz -= 1; + winctrl0 |= (xpos << 21); + winctrl0 |= (ypos << 10); + winctrl1 |= (xsz << 11); + winctrl1 |= (ysz << 0); + + /* Disable the window while making changes, then restore WINEN */ + winenable = lcd->winenable & (1 << plane); + au_sync(); + lcd->winenable &= ~(1 << plane); + lcd->window[plane].winctrl0 = winctrl0; + lcd->window[plane].winctrl1 = winctrl1; + lcd->window[plane].winbuf0 = + lcd->window[plane].winbuf1 = fbdev->fb_phys; + lcd->window[plane].winbufctrl = 0; /* select winbuf0 */ + lcd->winenable |= winenable; + au_sync(); + + return 0; +} + +static void au1200_setpanel (struct panel_settings *newpanel) +{ + /* + * Perform global setup/init of LCD controller + */ + uint32 winenable; + + /* Make sure all windows disabled */ + winenable = lcd->winenable; + lcd->winenable = 0; + au_sync(); + /* + * Ensure everything is disabled before reconfiguring + */ + if (lcd->screen & LCD_SCREEN_SEN) { + /* Wait for vertical sync period */ + lcd->intstatus = LCD_INT_SS; + while ((lcd->intstatus & LCD_INT_SS) == 0) { + au_sync(); + } + + lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/ + + do { + lcd->intstatus = lcd->intstatus; /*clear interrupts*/ + au_sync(); + /*wait for controller to shut down*/ + } while ((lcd->intstatus & LCD_INT_SD) == 0); + + /* Call shutdown of current panel (if up) */ + /* this must occur last, because if an external clock is driving + the controller, the clock cannot be turned off before first + shutting down the controller. + */ + if (panel->device_shutdown != NULL) + panel->device_shutdown(); + } + + /* Newpanel == NULL indicates a shutdown operation only */ + if (newpanel == NULL) + return; + + panel = newpanel; + + printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres); + + /* + * Setup clocking if internal LCD clock source (assumes sys_auxpll valid) + */ + if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT)) + { + uint32 sys_clksrc; + au_writel(panel->mode_auxpll, SYS_AUXPLL); + sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f; + sys_clksrc |= panel->mode_toyclksrc; + au_writel(sys_clksrc, SYS_CLKSRC); + } + + /* + * Configure panel timings + */ + lcd->screen = panel->mode_screen; + lcd->horztiming = panel->mode_horztiming; + lcd->verttiming = panel->mode_verttiming; + lcd->clkcontrol = panel->mode_clkcontrol; + lcd->pwmdiv = panel->mode_pwmdiv; + lcd->pwmhi = panel->mode_pwmhi; + lcd->outmask = panel->mode_outmask; + lcd->fifoctrl = panel->mode_fifoctrl; + au_sync(); + + /* fixme: Check window settings to make sure still valid + * for new geometry */ +#if 0 + au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos); + au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos); + au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos); + au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos); +#endif + lcd->winenable = winenable; + + /* + * Re-enable screen now that it is configured + */ + lcd->screen |= LCD_SCREEN_SEN; + au_sync(); + + /* Call init of panel */ + if (panel->device_init != NULL) panel->device_init(); + + /* FIX!!!! not appropriate on panel change!!! Global setup/init */ + lcd->intenable = 0; + lcd->intstatus = ~0; + lcd->backcolor = win->mode_backcolor; + + /* Setup Color Key - FIX!!! */ + lcd->colorkey = win->mode_colorkey; + lcd->colorkeymsk = win->mode_colorkeymsk; + + /* Setup HWCursor - FIX!!! Need to support this eventually */ + lcd->hwc.cursorctrl = 0; + lcd->hwc.cursorpos = 0; + lcd->hwc.cursorcolor0 = 0; + lcd->hwc.cursorcolor1 = 0; + lcd->hwc.cursorcolor2 = 0; + lcd->hwc.cursorcolor3 = 0; + + +#if 0 +#define D(X) printk("%25s: %08X\n", #X, X) + D(lcd->screen); + D(lcd->horztiming); + D(lcd->verttiming); + D(lcd->clkcontrol); + D(lcd->pwmdiv); + D(lcd->pwmhi); + D(lcd->outmask); + D(lcd->fifoctrl); + D(lcd->window[0].winctrl0); + D(lcd->window[0].winctrl1); + D(lcd->window[0].winctrl2); + D(lcd->window[0].winbuf0); + D(lcd->window[0].winbuf1); + D(lcd->window[0].winbufctrl); + D(lcd->window[1].winctrl0); + D(lcd->window[1].winctrl1); + D(lcd->window[1].winctrl2); + D(lcd->window[1].winbuf0); + D(lcd->window[1].winbuf1); + D(lcd->window[1].winbufctrl); + D(lcd->window[2].winctrl0); + D(lcd->window[2].winctrl1); + D(lcd->window[2].winctrl2); + D(lcd->window[2].winbuf0); + D(lcd->window[2].winbuf1); + D(lcd->window[2].winbufctrl); + D(lcd->window[3].winctrl0); + D(lcd->window[3].winctrl1); + D(lcd->window[3].winctrl2); + D(lcd->window[3].winbuf0); + D(lcd->window[3].winbuf1); + D(lcd->window[3].winbufctrl); + D(lcd->winenable); + D(lcd->intenable); + D(lcd->intstatus); + D(lcd->backcolor); + D(lcd->winenable); + D(lcd->colorkey); + D(lcd->colorkeymsk); + D(lcd->hwc.cursorctrl); + D(lcd->hwc.cursorpos); + D(lcd->hwc.cursorcolor0); + D(lcd->hwc.cursorcolor1); + D(lcd->hwc.cursorcolor2); + D(lcd->hwc.cursorcolor3); +#endif +} + +static void au1200_setmode(struct au1200fb_device *fbdev) +{ + int plane = fbdev->plane; + /* Window/plane setup */ + lcd->window[plane].winctrl1 = ( 0 + | LCD_WINCTRL1_PRI_N(plane) + | win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */ + ) ; + + au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos); + + lcd->window[plane].winctrl2 = ( 0 + | LCD_WINCTRL2_CKMODE_00 + | LCD_WINCTRL2_DBM + | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length) + | LCD_WINCTRL2_SCX_1 + | LCD_WINCTRL2_SCY_1 + ) ; + lcd->winenable |= win->w[plane].mode_winenable; + au_sync(); +} + + +/* Inline helpers */ + +/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/ +/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/ + +#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN) + +/* Bitfields format supported by the controller. */ +static struct fb_bitfield rgb_bitfields[][4] = { + /* Red, Green, Blue, Transp */ + [LCD_WINCTRL1_FRM_16BPP655 >> 25] = + { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPP565 >> 25] = + { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPP556 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPI1555 >> 25] = + { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPI5551 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPA1555 >> 25] = + { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPA5551 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } }, + + [LCD_WINCTRL1_FRM_24BPP >> 25] = + { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_32BPP >> 25] = + { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } }, +}; + +/*-------------------------------------------------------------------------*/ + +/* Helpers */ + +static void au1200fb_update_fbinfo(struct fb_info *fbi) +{ + /* FIX!!!! This also needs to take the window pixel format into account!!! */ + + /* Update var-dependent FB info */ + if (panel_is_color(panel)) { + if (fbi->var.bits_per_pixel <= 8) { + /* palettized */ + fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR; + fbi->fix.line_length = fbi->var.xres_virtual / + (8/fbi->var.bits_per_pixel); + } else { + /* non-palettized */ + fbi->fix.visual = FB_VISUAL_TRUECOLOR; + fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8); + } + } else { + /* mono FIX!!! mono 8 and 4 bits */ + fbi->fix.visual = FB_VISUAL_MONO10; + fbi->fix.line_length = fbi->var.xres_virtual / 8; + } + + fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual; + print_dbg("line length: %d\n", fbi->fix.line_length); + print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel); +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 framebuffer driver */ + +/* fb_check_var + * Validate var settings with hardware restrictions and modify it if necessary + */ +static int au1200fb_fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *fbi) +{ + struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi; + u32 pixclock; + int screen_size, plane; + + plane = fbdev->plane; + + /* Make sure that the mode respect all LCD controller and + * panel restrictions. */ + var->xres = win->w[plane].xres; + var->yres = win->w[plane].yres; + + /* No need for virtual resolution support */ + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1); + + screen_size = var->xres_virtual * var->yres_virtual; + if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8); + else screen_size /= (8/var->bits_per_pixel); + + if (fbdev->fb_len < screen_size) + return -EINVAL; /* Virtual screen is to big, abort */ + + /* FIX!!!! what are the implicaitons of ignoring this for windows ??? */ + /* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel + * clock can only be obtain by dividing this value by an even integer. + * Fallback to a slower pixel clock if necessary. */ + pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin); + pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2)); + + if (AU1200_LCD_MAX_CLK % pixclock) { + int diff = AU1200_LCD_MAX_CLK % pixclock; + pixclock -= diff; + } + + var->pixclock = KHZ2PICOS(pixclock/1000); +#if 0 + if (!panel_is_active(panel)) { + int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1; + + if (!panel_is_color(panel) + && (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) { + /* STN 8bit mono panel support is up to 6MHz pixclock */ + var->pixclock = KHZ2PICOS(6000); + } else if (!pcd) { + /* Other STN panel support is up to 12MHz */ + var->pixclock = KHZ2PICOS(12000); + } + } +#endif + /* Set bitfield accordingly */ + switch (var->bits_per_pixel) { + case 16: + { + /* 16bpp True color. + * These must be set to MATCH WINCTRL[FORM] */ + int idx; + idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25; + var->red = rgb_bitfields[idx][0]; + var->green = rgb_bitfields[idx][1]; + var->blue = rgb_bitfields[idx][2]; + var->transp = rgb_bitfields[idx][3]; + break; + } + + case 32: + { + /* 32bpp True color. + * These must be set to MATCH WINCTRL[FORM] */ + int idx; + idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25; + var->red = rgb_bitfields[idx][0]; + var->green = rgb_bitfields[idx][1]; + var->blue = rgb_bitfields[idx][2]; + var->transp = rgb_bitfields[idx][3]; + break; + } + default: + print_dbg("Unsupported depth %dbpp", var->bits_per_pixel); + return -EINVAL; + } + + return 0; +} + +/* fb_set_par + * Set hardware with var settings. This will enable the controller with a + * specific mode, normally validated with the fb_check_var method + */ +static int au1200fb_fb_set_par(struct fb_info *fbi) +{ + struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi; + + au1200fb_update_fbinfo(fbi); + au1200_setmode(fbdev); + + return 0; +} + +/* fb_setcolreg + * Set color in LCD palette. + */ +static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, struct fb_info *fbi) +{ + volatile u32 *palette = lcd->palette; + u32 value; + + if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1)) + return -EINVAL; + + if (fbi->var.grayscale) { + /* Convert color to grayscale */ + red = green = blue = + (19595 * red + 38470 * green + 7471 * blue) >> 16; + } + + if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) { + /* Place color in the pseudopalette */ + if (regno > 16) + return -EINVAL; + + palette = (u32*) fbi->pseudo_palette; + + red >>= (16 - fbi->var.red.length); + green >>= (16 - fbi->var.green.length); + blue >>= (16 - fbi->var.blue.length); + + value = (red << fbi->var.red.offset) | + (green << fbi->var.green.offset)| + (blue << fbi->var.blue.offset); + value &= 0xFFFF; + + } else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) { + /* COLOR TFT PALLETTIZED (use RGB 565) */ + value = (red & 0xF800)|((green >> 5) & + 0x07E0)|((blue >> 11) & 0x001F); + value &= 0xFFFF; + + } else if (0 /*panel_is_color(fbdev->panel)*/) { + /* COLOR STN MODE */ + value = 0x1234; + value &= 0xFFF; + } else { + /* MONOCHROME MODE */ + value = (green >> 12) & 0x000F; + value &= 0xF; + } + + palette[regno] = value; + + return 0; +} + +/* fb_blank + * Blank the screen. Depending on the mode, the screen will be + * activated with the backlight color, or desactivated + */ +static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi) +{ + /* Short-circuit screen blanking */ + if (noblanking) + return 0; + + switch (blank_mode) { + + case FB_BLANK_UNBLANK: + case FB_BLANK_NORMAL: + /* printk("turn on panel\n"); */ + au1200_setpanel(panel); + break; + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_POWERDOWN: + /* printk("turn off panel\n"); */ + au1200_setpanel(NULL); + break; + default: + break; + + } + + /* FB_BLANK_NORMAL is a soft blank */ + return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0; +} + +/* fb_mmap + * Map video memory in user space. We don't use the generic fb_mmap + * method mainly to allow the use of the TLB streaming flag (CCA=6) + */ +static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) + +{ + unsigned int len; + unsigned long start=0, off; + struct au1200fb_device *fbdev = (struct au1200fb_device *) info; + +#ifdef CONFIG_PM + au1xxx_pm_access(LCD_pm_dev); +#endif + + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { + return -EINVAL; + } + + start = fbdev->fb_phys & PAGE_MASK; + len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); + + off = vma->vm_pgoff << PAGE_SHIFT; + + if ((vma->vm_end - vma->vm_start + off) > len) { + return -EINVAL; + } + + off += start; + vma->vm_pgoff = off >> PAGE_SHIFT; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ + + vma->vm_flags |= VM_IO; + + return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + return 0; +} + +static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) +{ + + unsigned int hi1, divider; + + /* SCREEN_SIZE: user cannot reset size, must switch panel choice */ + + if (pdata->flags & SCREEN_BACKCOLOR) + lcd->backcolor = pdata->backcolor; + + if (pdata->flags & SCREEN_BRIGHTNESS) { + + // limit brightness pwm duty to >= 30/1600 + if (pdata->brightness < 30) { + pdata->brightness = 30; + } + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + hi1 = (lcd->pwmhi >> 16) + 1; + hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8); + lcd->pwmhi &= 0xFFFF; + lcd->pwmhi |= (hi1 << 16); + } + + if (pdata->flags & SCREEN_COLORKEY) + lcd->colorkey = pdata->colorkey; + + if (pdata->flags & SCREEN_MASK) + lcd->colorkeymsk = pdata->mask; + au_sync(); +} + +static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) +{ + unsigned int hi1, divider; + + pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1; + pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1; + + pdata->backcolor = lcd->backcolor; + pdata->colorkey = lcd->colorkey; + pdata->mask = lcd->colorkeymsk; + + // brightness + hi1 = (lcd->pwmhi >> 16) + 1; + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + pdata->brightness = ((hi1 << 8) / divider) - 1; + au_sync(); +} + +static void set_window(unsigned int plane, + struct au1200_lcd_window_regs_t *pdata) +{ + unsigned int val, bpp; + + /* Window control register 0 */ + if (pdata->flags & WIN_POSITION) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX | + LCD_WINCTRL0_OY); + val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX); + val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY); + lcd->window[plane].winctrl0 = val; + } + if (pdata->flags & WIN_ALPHA_COLOR) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A); + val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A); + lcd->window[plane].winctrl0 = val; + } + if (pdata->flags & WIN_ALPHA_MODE) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN); + val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN); + lcd->window[plane].winctrl0 = val; + } + + /* Window control register 1 */ + if (pdata->flags & WIN_PRIORITY) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI); + val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_CHANNEL) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE); + val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_BUFFER_FORMAT) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM); + val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_COLOR_ORDER) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO); + val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_PIXEL_ORDER) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO); + val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_SIZE) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX | + LCD_WINCTRL1_SZY); + val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX); + val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY); + lcd->window[plane].winctrl1 = val; + /* program buffer line width */ + bpp = winbpp(val) / 8; + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX); + val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX); + lcd->window[plane].winctrl2 = val; + } + + /* Window control register 2 */ + if (pdata->flags & WIN_COLORKEY_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE); + val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE); + lcd->window[plane].winctrl2 = val; + } + if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM); + val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM); + lcd->window[plane].winctrl2 = val; + } + if (pdata->flags & WIN_RAM_ARRAY_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM); + val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM); + lcd->window[plane].winctrl2 = val; + } + + /* Buffer line width programmed with WIN_SIZE */ + + if (pdata->flags & WIN_BUFFER_SCALE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX | + LCD_WINCTRL2_SCY); + val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX); + val |= ((pdata->ysize) & LCD_WINCTRL2_SCY); + lcd->window[plane].winctrl2 = val; + } + + if (pdata->flags & WIN_ENABLE) { + val = lcd->winenable; + val &= ~(1<<plane); + val |= (pdata->enable & 1) << plane; + lcd->winenable = val; + } + au_sync(); +} + +static void get_window(unsigned int plane, + struct au1200_lcd_window_regs_t *pdata) +{ + /* Window control register 0 */ + pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21; + pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10; + pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2; + pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1; + + /* Window control register 1 */ + pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30; + pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29; + pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25; + pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24; + pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22; + pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1; + pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1; + + /* Window control register 2 */ + pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24; + pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23; + pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21; + + pdata->enable = (lcd->winenable >> plane) & 1; + au_sync(); +} + +static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + int plane; + int val; + +#ifdef CONFIG_PM + au1xxx_pm_access(LCD_pm_dev); +#endif + + plane = fbinfo2index(info); + print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane); + + if (cmd == AU1200_LCD_FB_IOCTL) { + struct au1200_lcd_iodata_t iodata; + + if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata))) + return -EFAULT; + + print_dbg("FB IOCTL called\n"); + + switch (iodata.subcmd) { + case AU1200_LCD_SET_SCREEN: + print_dbg("AU1200_LCD_SET_SCREEN\n"); + set_global(cmd, &iodata.global); + break; + + case AU1200_LCD_GET_SCREEN: + print_dbg("AU1200_LCD_GET_SCREEN\n"); + get_global(cmd, &iodata.global); + break; + + case AU1200_LCD_SET_WINDOW: + print_dbg("AU1200_LCD_SET_WINDOW\n"); + set_window(plane, &iodata.window); + break; + + case AU1200_LCD_GET_WINDOW: + print_dbg("AU1200_LCD_GET_WINDOW\n"); + get_window(plane, &iodata.window); + break; + + case AU1200_LCD_SET_PANEL: + print_dbg("AU1200_LCD_SET_PANEL\n"); + if ((iodata.global.panel_choice >= 0) && + (iodata.global.panel_choice < + NUM_PANELS)) + { + struct panel_settings *newpanel; + panel_index = iodata.global.panel_choice; + newpanel = &known_lcd_panels[panel_index]; + au1200_setpanel(newpanel); + } + break; + + case AU1200_LCD_GET_PANEL: + print_dbg("AU1200_LCD_GET_PANEL\n"); + iodata.global.panel_choice = panel_index; + break; + + default: + return -EINVAL; + } + + val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata)); + if (val) { + print_dbg("error: could not copy %d bytes\n", val); + return -EFAULT; + } + } + + return 0; +} + + +static struct fb_ops au1200fb_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = au1200fb_fb_check_var, + .fb_set_par = au1200fb_fb_set_par, + .fb_setcolreg = au1200fb_fb_setcolreg, + .fb_blank = au1200fb_fb_blank, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_sync = NULL, + .fb_ioctl = au1200fb_ioctl, + .fb_mmap = au1200fb_fb_mmap, +}; + +/*-------------------------------------------------------------------------*/ + +static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id, struct pt_regs *regs) +{ + /* Nothing to do for now, just clear any pending interrupt */ + lcd->intstatus = lcd->intstatus; + au_sync(); + + return IRQ_HANDLED; +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 LCD device probe helpers */ + +static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev) +{ + struct fb_info *fbi = &fbdev->fb_info; + int bpp; + + memset(fbi, 0, sizeof(struct fb_info)); + fbi->fbops = &au1200fb_fb_ops; + + bpp = winbpp(win->w[fbdev->plane].mode_winctrl1); + + /* Copy monitor specs from panel data */ + /* fixme: we're setting up LCD controller windows, so these dont give a + damn as to what the monitor specs are (the panel itself does, but that + isnt done here...so maybe need a generic catchall monitor setting??? */ + memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs)); + + /* We first try the user mode passed in argument. If that failed, + * or if no one has been specified, we default to the first mode of the + * panel list. Note that after this call, var data will be set */ + if (!fb_find_mode(&fbi->var, + fbi, + NULL, /* drv_info.opt_mode, */ + fbi->monspecs.modedb, + fbi->monspecs.modedb_len, + fbi->monspecs.modedb, + bpp)) { + + print_err("Cannot find valid mode for panel %s", panel->name); + return -EFAULT; + } + + fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); + if (!fbi->pseudo_palette) { + return -ENOMEM; + } + memset(fbi->pseudo_palette, 0, sizeof(u32) * 16); + + if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { + print_err("Fail to allocate colormap (%d entries)", + AU1200_LCD_NBR_PALETTE_ENTRIES); + kfree(fbi->pseudo_palette); + return -EFAULT; + } + + strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id)); + fbi->fix.smem_start = fbdev->fb_phys; + fbi->fix.smem_len = fbdev->fb_len; + fbi->fix.type = FB_TYPE_PACKED_PIXELS; + fbi->fix.xpanstep = 0; + fbi->fix.ypanstep = 0; + fbi->fix.mmio_start = 0; + fbi->fix.mmio_len = 0; + fbi->fix.accel = FB_ACCEL_NONE; + + fbi->screen_base = (char __iomem *) fbdev->fb_mem; + + au1200fb_update_fbinfo(fbi); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 LCD controller device driver */ + +static int au1200fb_drv_probe(struct device *dev) +{ + struct au1200fb_device *fbdev; + unsigned long page; + int bpp, plane, ret; + + if (!dev) + return -EINVAL; + + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) { + bpp = winbpp(win->w[plane].mode_winctrl1); + if (win->w[plane].xres == 0) + win->w[plane].xres = panel->Xres; + if (win->w[plane].yres == 0) + win->w[plane].yres = panel->Yres; + + fbdev = &_au1200fb_devices[plane]; + memset(fbdev, 0, sizeof(struct au1200fb_device)); + fbdev->plane = plane; + + /* Allocate the framebuffer to the maximum screen size */ + fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; + + fbdev->fb_mem = dma_alloc_noncoherent(dev, + PAGE_ALIGN(fbdev->fb_len), + &fbdev->fb_phys, GFP_KERNEL); + if (!fbdev->fb_mem) { + print_err("fail to allocate frambuffer (size: %dK))", + fbdev->fb_len / 1024); + return -ENOMEM; + } + + /* + * Set page reserved so that mmap will work. This is necessary + * since we'll be remapping normal memory. + */ + for (page = (unsigned long)fbdev->fb_phys; + page < PAGE_ALIGN((unsigned long)fbdev->fb_phys + + fbdev->fb_len); + page += PAGE_SIZE) { + SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */ + } + print_dbg("Framebuffer memory map at %p", fbdev->fb_mem); + print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024); + + /* Init FB data */ + if ((ret = au1200fb_init_fbinfo(fbdev)) < 0) + goto failed; + + /* Register new framebuffer */ + if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) { + print_err("cannot register new framebuffer"); + goto failed; + } + + au1200fb_fb_set_par(&fbdev->fb_info); + +#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) + if (plane == 0) + if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) { + /* Start display and show logo on boot */ + fb_set_cmap(&fbdev->fb_info.cmap, + &fbdev->fb_info); + + fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR); + } +#endif + } + + /* Now hook interrupt too */ + if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq, + SA_INTERRUPT | SA_SHIRQ, "lcd", (void *)dev)) < 0) { + print_err("fail to request interrupt line %d (err: %d)", + AU1200_LCD_INT, ret); + goto failed; + } + + return 0; + +failed: + /* NOTE: This only does the current plane/window that failed; others are still active */ + if (fbdev->fb_mem) + dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len), + fbdev->fb_mem, fbdev->fb_phys); + if (fbdev->fb_info.cmap.len != 0) + fb_dealloc_cmap(&fbdev->fb_info.cmap); + if (fbdev->fb_info.pseudo_palette) + kfree(fbdev->fb_info.pseudo_palette); + if (plane == 0) + free_irq(AU1200_LCD_INT, (void*)dev); + return ret; +} + +static int au1200fb_drv_remove(struct device *dev) +{ + struct au1200fb_device *fbdev; + int plane; + + if (!dev) + return -ENODEV; + + /* Turn off the panel */ + au1200_setpanel(NULL); + + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) + { + fbdev = &_au1200fb_devices[plane]; + + /* Clean up all probe data */ + unregister_framebuffer(&fbdev->fb_info); + if (fbdev->fb_mem) + dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len), + fbdev->fb_mem, fbdev->fb_phys); + if (fbdev->fb_info.cmap.len != 0) + fb_dealloc_cmap(&fbdev->fb_info.cmap); + if (fbdev->fb_info.pseudo_palette) + kfree(fbdev->fb_info.pseudo_palette); + } + + free_irq(AU1200_LCD_INT, (void *)dev); + + return 0; +} + +#ifdef CONFIG_PM +static int au1200fb_drv_suspend(struct device *dev, u32 state, u32 level) +{ + /* TODO */ + return 0; +} + +static int au1200fb_drv_resume(struct device *dev, u32 level) +{ + /* TODO */ + return 0; +} +#endif /* CONFIG_PM */ + +static struct device_driver au1200fb_driver = { + .name = "au1200-lcd", + .bus = &platform_bus_type, + .probe = au1200fb_drv_probe, + .remove = au1200fb_drv_remove, +#ifdef CONFIG_PM + .suspend = au1200fb_drv_suspend, + .resume = au1200fb_drv_resume, +#endif +}; + +/*-------------------------------------------------------------------------*/ + +/* Kernel driver */ + +static void au1200fb_setup(void) +{ + char* options = NULL; + char* this_opt; + int num_panels = ARRAY_SIZE(known_lcd_panels); + int panel_idx = -1; + + fb_get_options(DRIVER_NAME, &options); + + if (options) { + while ((this_opt = strsep(&options,",")) != NULL) { + /* Panel option - can be panel name, + * "bs" for board-switch, or number/index */ + if (!strncmp(this_opt, "panel:", 6)) { + int i; + long int li; + char *endptr; + this_opt += 6; + /* First check for index, which allows + * to short circuit this mess */ + li = simple_strtol(this_opt, &endptr, 0); + if (*endptr == '\0') { + panel_idx = (int)li; + } + else if (strcmp(this_opt, "bs") == 0) { + extern int board_au1200fb_panel(void); + panel_idx = board_au1200fb_panel(); + } + + else + for (i = 0; i < num_panels; i++) { + if (!strcmp(this_opt, known_lcd_panels[i].name)) { + panel_idx = i; + break; + } + } + + if ((panel_idx < 0) || (panel_idx >= num_panels)) { + print_warn("Panel %s not supported!", this_opt); + } + else + panel_index = panel_idx; + } + + else if (strncmp(this_opt, "nohwcursor", 10) == 0) { + nohwcursor = 1; + } + + /* Unsupported option */ + else { + print_warn("Unsupported option \"%s\"", this_opt); + } + } + } +} + +#ifdef CONFIG_PM +static int au1200fb_pm_callback(au1xxx_power_dev_t *dev, + au1xxx_request_t request, void *data) { + int retval = -1; + unsigned int d = 0; + unsigned int brightness = 0; + + if (request == AU1XXX_PM_SLEEP) { + board_au1200fb_panel_shutdown(); + } + else if (request == AU1XXX_PM_WAKEUP) { + if(dev->prev_state == SLEEP_STATE) + { + int plane; + au1200_setpanel(panel); + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) { + struct au1200fb_device *fbdev; + fbdev = &_au1200fb_devices[plane]; + au1200fb_fb_set_par(&fbdev->fb_info); + } + } + + d = *((unsigned int*)data); + if(d <=10) brightness = 26; + else if(d<=20) brightness = 51; + else if(d<=30) brightness = 77; + else if(d<=40) brightness = 102; + else if(d<=50) brightness = 128; + else if(d<=60) brightness = 153; + else if(d<=70) brightness = 179; + else if(d<=80) brightness = 204; + else if(d<=90) brightness = 230; + else brightness = 255; + set_brightness(brightness); + } else if (request == AU1XXX_PM_GETSTATUS) { + return dev->cur_state; + } else if (request == AU1XXX_PM_ACCESS) { + if (dev->cur_state != SLEEP_STATE) + return retval; + else { + au1200_setpanel(panel); + } + } else if (request == AU1XXX_PM_IDLE) { + } else if (request == AU1XXX_PM_CLEANUP) { + } + + return retval; +} +#endif + +static int __init au1200fb_init(void) +{ + print_info("" DRIVER_DESC ""); + + /* Setup driver with options */ + au1200fb_setup(); + + /* Point to the panel selected */ + panel = &known_lcd_panels[panel_index]; + win = &windows[window_index]; + + printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name); + printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name); + + /* Kickstart the panel, the framebuffers/windows come soon enough */ + au1200_setpanel(panel); + + #ifdef CONFIG_PM + LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL); + if ( LCD_pm_dev == NULL) + printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n"); + else + printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n"); + #endif + + return driver_register(&au1200fb_driver); +} + +static void __exit au1200fb_cleanup(void) +{ + driver_unregister(&au1200fb_driver); +} + +module_init(au1200fb_init); +module_exit(au1200fb_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +/* + * BRIEF MODULE DESCRIPTION + * Au1200 LCD Driver. + * + * Copyright 2004-2005 AMD + * Author: AMD + * + * Based on: + * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device + * Created 28 Dec 1997 by Geert Uytterhoeven + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> + +#include <asm/mach-au1x00/au1000.h> +#include "au1200fb.h" + +#ifdef CONFIG_PM +#include <asm/mach-au1x00/au1xxx_pm.h> +#endif + +#ifndef CONFIG_FB_AU1200_DEVS +#define CONFIG_FB_AU1200_DEVS 4 +#endif + +#define DRIVER_NAME "au1200fb" +#define DRIVER_DESC "LCD controller driver for AU1200 processors" + +#define DEBUG 1 + +#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg) +#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg) +#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg) + +#if DEBUG +#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg) +#else +#define print_dbg(f, arg...) do {} while (0) +#endif + + +#define AU1200_LCD_FB_IOCTL 0x46FF + +#define AU1200_LCD_SET_SCREEN 1 +#define AU1200_LCD_GET_SCREEN 2 +#define AU1200_LCD_SET_WINDOW 3 +#define AU1200_LCD_GET_WINDOW 4 +#define AU1200_LCD_SET_PANEL 5 +#define AU1200_LCD_GET_PANEL 6 + +#define SCREEN_SIZE (1<< 1) +#define SCREEN_BACKCOLOR (1<< 2) +#define SCREEN_BRIGHTNESS (1<< 3) +#define SCREEN_COLORKEY (1<< 4) +#define SCREEN_MASK (1<< 5) + +struct au1200_lcd_global_regs_t { + unsigned int flags; + unsigned int xsize; + unsigned int ysize; + unsigned int backcolor; + unsigned int brightness; + unsigned int colorkey; + unsigned int mask; + unsigned int panel_choice; + char panel_desc[80]; + +}; + +#define WIN_POSITION (1<< 0) +#define WIN_ALPHA_COLOR (1<< 1) +#define WIN_ALPHA_MODE (1<< 2) +#define WIN_PRIORITY (1<< 3) +#define WIN_CHANNEL (1<< 4) +#define WIN_BUFFER_FORMAT (1<< 5) +#define WIN_COLOR_ORDER (1<< 6) +#define WIN_PIXEL_ORDER (1<< 7) +#define WIN_SIZE (1<< 8) +#define WIN_COLORKEY_MODE (1<< 9) +#define WIN_DOUBLE_BUFFER_MODE (1<< 10) +#define WIN_RAM_ARRAY_MODE (1<< 11) +#define WIN_BUFFER_SCALE (1<< 12) +#define WIN_ENABLE (1<< 13) + +struct au1200_lcd_window_regs_t { + unsigned int flags; + unsigned int xpos; + unsigned int ypos; + unsigned int alpha_color; + unsigned int alpha_mode; + unsigned int priority; + unsigned int channel; + unsigned int buffer_format; + unsigned int color_order; + unsigned int pixel_order; + unsigned int xsize; + unsigned int ysize; + unsigned int colorkey_mode; + unsigned int double_buffer_mode; + unsigned int ram_array_mode; + unsigned int xscale; + unsigned int yscale; + unsigned int enable; +}; + + +struct au1200_lcd_iodata_t { + unsigned int subcmd; + struct au1200_lcd_global_regs_t global; + struct au1200_lcd_window_regs_t window; +}; + +#if defined(__BIG_ENDIAN) +#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11 +#else +#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00 +#endif +#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565 + +/* Private, per-framebuffer management information (independent of the panel itself) */ +struct au1200fb_device { + struct fb_info fb_info; /* FB driver info record */ + + int plane; + unsigned char* fb_mem; /* FrameBuffer memory map */ + unsigned int fb_len; + dma_addr_t fb_phys; +}; + +static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS]; +/********************************************************************/ + +/* LCD controller restrictions */ +#define AU1200_LCD_MAX_XRES 1280 +#define AU1200_LCD_MAX_YRES 1024 +#define AU1200_LCD_MAX_BPP 32 +#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */ +#define AU1200_LCD_NBR_PALETTE_ENTRIES 256 + +/* Default number of visible screen buffer to allocate */ +#define AU1200FB_NBR_VIDEO_BUFFERS 1 + +/********************************************************************/ + +static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR; +static int window_index = 2; /* default is zero */ +static int panel_index = 2; /* default is zero */ +static struct window_settings *win; +static struct panel_settings *panel; +static int noblanking = 1; +static int nohwcursor = 0; + +struct window_settings { + unsigned char name[64]; + uint32 mode_backcolor; + uint32 mode_colorkey; + uint32 mode_colorkeymsk; + struct { + int xres; + int yres; + int xpos; + int ypos; + uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */ + uint32 mode_winenable; + } w[4]; +}; + +#if defined(__BIG_ENDIAN) +#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00 +#else +#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01 +#endif + +extern int board_au1200fb_panel_init (void); +extern int board_au1200fb_panel_shutdown (void); + +#ifdef CONFIG_PM +int au1200fb_pm_callback(au1xxx_power_dev_t *dev, + au1xxx_request_t request, void *data); +au1xxx_power_dev_t *LCD_pm_dev; +#endif + +/* + * Default window configurations + */ +static struct window_settings windows[] = { + { /* Index 0 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 100, 100, 100, 100, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ LCD_WINENABLE_WEN1, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + + { /* Index 1 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 320, 240, 5, 5, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP | + LCD_WINCTRL1_PO_00, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 + | LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 100, 100, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/, + }, + { + /* xres, yres, xpos, ypos */ 200, 25, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + { /* Index 2 */ + "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx", + /* mode_backcolor */ 0x006600ff, + /* mode_colorkey,msk*/ 0, 0, + { + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ LCD_WINENABLE_WEN0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP, + /* mode_winenable*/ 0, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP | + LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/, + }, + { + /* xres, yres, xpos, ypos */ 0, 0, 0, 0, + /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 | + LCD_WINCTRL1_PO_16BPP | + LCD_WINCTRL1_PIPE, + /* mode_winenable*/ 0, + }, + }, + }, + /* Need VGA 640 @ 24bpp, @ 32bpp */ + /* Need VGA 800 @ 24bpp, @ 32bpp */ + /* Need VGA 1024 @ 24bpp, @ 32bpp */ +}; + +/* + * Controller configurations for various panels. + */ + +struct panel_settings +{ + const char name[25]; /* Full name <vendor>_<model> */ + + struct fb_monspecs monspecs; /* FB monitor specs */ + + /* panel timings */ + uint32 mode_screen; + uint32 mode_horztiming; + uint32 mode_verttiming; + uint32 mode_clkcontrol; + uint32 mode_pwmdiv; + uint32 mode_pwmhi; + uint32 mode_outmask; + uint32 mode_fifoctrl; + uint32 mode_toyclksrc; + uint32 mode_backlight; + uint32 mode_auxpll; + int (*device_init)(void); + int (*device_shutdown)(void); +#define Xres min_xres +#define Yres min_yres + u32 min_xres; /* Minimum horizontal resolution */ + u32 max_xres; /* Maximum horizontal resolution */ + u32 min_yres; /* Minimum vertical resolution */ + u32 max_yres; /* Maximum vertical resolution */ +}; + +/********************************************************************/ +/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */ + +/* List of panels known to work with the AU1200 LCD controller. + * To add a new panel, enter the same specifications as the + * Generic_TFT one, and MAKE SURE that it doesn't conflicts + * with the controller restrictions. Restrictions are: + * + * STN color panels: max_bpp <= 12 + * STN mono panels: max_bpp <= 4 + * TFT panels: max_bpp <= 16 + * max_xres <= 800 + * max_yres <= 600 + */ +static struct panel_settings known_lcd_panels[] = +{ + [0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */ + .name = "QVGA_320x240", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(320) | + LCD_SCREEN_SY_N(240), + .mode_horztiming = 0x00c4623b, + .mode_verttiming = 0x00502814, + .mode_clkcontrol = 0x00020002, /* /4=24Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 320, 320, + 240, 240, + }, + + [1] = { /* VGA 640x480 H:30.3kHz V:58Hz */ + .name = "VGA_640x480", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x13f9df80, + .mode_horztiming = 0x003c5859, + .mode_verttiming = 0x00741201, + .mode_clkcontrol = 0x00020001, /* /4=24Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 640, 480, + 640, 480, + }, + + [2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */ + .name = "SVGA_800x600", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x18fa5780, + .mode_horztiming = 0x00dc7e77, + .mode_verttiming = 0x00584805, + .mode_clkcontrol = 0x00020000, /* /2=48Mhz */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 800, 800, + 600, 600, + }, + + [3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */ + .name = "XVGA_1024x768", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x1ffaff80, + .mode_horztiming = 0x007d0e57, + .mode_verttiming = 0x00740a01, + .mode_clkcontrol = 0x000A0000, /* /1 */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 6, /* 72MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 1024, 1024, + 768, 768, + }, + + [4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */ + .name = "XVGA_1280x1024", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x27fbff80, + .mode_horztiming = 0x00cdb2c7, + .mode_verttiming = 0x00600002, + .mode_clkcontrol = 0x000A0000, /* /1 */ + .mode_pwmdiv = 0x00000000, + .mode_pwmhi = 0x00000000, + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 10, /* 120MHz AUXPLL */ + .device_init = NULL, + .device_shutdown = NULL, + 1280, 1280, + 1024, 1024, + }, + + [5] = { /* Samsung 1024x768 TFT */ + .name = "Samsung_1024x768_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = 0x1ffaff80, + .mode_horztiming = 0x018cc677, + .mode_verttiming = 0x00241217, + .mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, /* SCB 0x0 */ + .mode_pwmhi = 0x03400000, /* SCB 0x0 */ + .mode_outmask = 0x00FFFFFF, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 1024, 1024, + 768, 768, + }, + + [6] = { /* Toshiba 640x480 TFT */ + .name = "Toshiba_640x480_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(640) | + LCD_SCREEN_SY_N(480), + .mode_horztiming = LCD_HORZTIMING_HPW_N(96) | + LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51), + .mode_verttiming = LCD_VERTTIMING_VPW_N(2) | + LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32), + .mode_clkcontrol = 0x00000000, /* /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 640, 480, + 640, 480, + }, + + [7] = { /* Sharp 320x240 TFT */ + .name = "Sharp_320x240_TFT", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 12500, + .hfmax = 20000, + .vfmin = 38, + .vfmax = 81, + .dclkmin = 4500000, + .dclkmax = 6800000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(320) | + LCD_SCREEN_SY_N(240), + .mode_horztiming = LCD_HORZTIMING_HPW_N(60) | + LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2), + .mode_verttiming = LCD_VERTTIMING_VPW_N(2) | + LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5), + .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 320, 320, + 240, 240, + }, + + [8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */ + .name = "Toppoly_TD070WGCB2", + .monspecs = { + .modedb = NULL, + .modedb_len = 0, + .hfmin = 30000, + .hfmax = 70000, + .vfmin = 60, + .vfmax = 60, + .dclkmin = 6000000, + .dclkmax = 28000000, + .input = FB_DISP_RGB, + }, + .mode_screen = LCD_SCREEN_SX_N(856) | + LCD_SCREEN_SY_N(480), + .mode_horztiming = LCD_HORZTIMING_HND2_N(43) | + LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114), + .mode_verttiming = LCD_VERTTIMING_VND2_N(20) | + LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4), + .mode_clkcontrol = 0x00020001, /* /4=24Mhz */ + .mode_pwmdiv = 0x8000063f, + .mode_pwmhi = 0x03400000, + .mode_outmask = 0x00fcfcfc, + .mode_fifoctrl = 0x2f2f2f2f, + .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ + .mode_backlight = 0x00000000, + .mode_auxpll = 8, /* 96MHz AUXPLL */ + .device_init = board_au1200fb_panel_init, + .device_shutdown = board_au1200fb_panel_shutdown, + 856, 856, + 480, 480, + }, +}; + +#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels)) + +/********************************************************************/ + +#ifdef CONFIG_PM +static int set_brightness(unsigned int brightness) +{ + unsigned int hi1, divider; + + /* limit brightness pwm duty to >= 30/1600 */ + if (brightness < 30) { + brightness = 30; + } + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + hi1 = (lcd->pwmhi >> 16) + 1; + hi1 = (((brightness & 0xFF) + 1) * divider >> 8); + lcd->pwmhi &= 0xFFFF; + lcd->pwmhi |= (hi1 << 16); + + return brightness; +} +#endif /* CONFIG_PM */ + +static int winbpp (unsigned int winctrl1) +{ + int bits = 0; + + /* how many bits are needed for each pixel format */ + switch (winctrl1 & LCD_WINCTRL1_FRM) { + case LCD_WINCTRL1_FRM_1BPP: + bits = 1; + break; + case LCD_WINCTRL1_FRM_2BPP: + bits = 2; + break; + case LCD_WINCTRL1_FRM_4BPP: + bits = 4; + break; + case LCD_WINCTRL1_FRM_8BPP: + bits = 8; + break; + case LCD_WINCTRL1_FRM_12BPP: + case LCD_WINCTRL1_FRM_16BPP655: + case LCD_WINCTRL1_FRM_16BPP565: + case LCD_WINCTRL1_FRM_16BPP556: + case LCD_WINCTRL1_FRM_16BPPI1555: + case LCD_WINCTRL1_FRM_16BPPI5551: + case LCD_WINCTRL1_FRM_16BPPA1555: + case LCD_WINCTRL1_FRM_16BPPA5551: + bits = 16; + break; + case LCD_WINCTRL1_FRM_24BPP: + case LCD_WINCTRL1_FRM_32BPP: + bits = 32; + break; + } + + return bits; +} + +static int fbinfo2index (struct fb_info *fb_info) +{ + int i; + + for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) { + if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info)) + return i; + } + printk("au1200fb: ERROR: fbinfo2index failed!\n"); + return -1; +} + +static int au1200_setlocation (struct au1200fb_device *fbdev, int plane, + int xpos, int ypos) +{ + uint32 winctrl0, winctrl1, winenable, fb_offset = 0; + int xsz, ysz; + + /* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */ + + winctrl0 = lcd->window[plane].winctrl0; + winctrl1 = lcd->window[plane].winctrl1; + winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN); + winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY); + + /* Check for off-screen adjustments */ + xsz = win->w[plane].xres; + ysz = win->w[plane].yres; + if ((xpos + win->w[plane].xres) > panel->Xres) { + /* Off-screen to the right */ + xsz = panel->Xres - xpos; /* off by 1 ??? */ + /*printk("off screen right\n");*/ + } + + if ((ypos + win->w[plane].yres) > panel->Yres) { + /* Off-screen to the bottom */ + ysz = panel->Yres - ypos; /* off by 1 ??? */ + /*printk("off screen bottom\n");*/ + } + + if (xpos < 0) { + /* Off-screen to the left */ + xsz = win->w[plane].xres + xpos; + fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8); + xpos = 0; + /*printk("off screen left\n");*/ + } + + if (ypos < 0) { + /* Off-screen to the top */ + ysz = win->w[plane].yres + ypos; + /* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */ + ypos = 0; + /*printk("off screen top\n");*/ + } + + /* record settings */ + win->w[plane].xpos = xpos; + win->w[plane].ypos = ypos; + + xsz -= 1; + ysz -= 1; + winctrl0 |= (xpos << 21); + winctrl0 |= (ypos << 10); + winctrl1 |= (xsz << 11); + winctrl1 |= (ysz << 0); + + /* Disable the window while making changes, then restore WINEN */ + winenable = lcd->winenable & (1 << plane); + au_sync(); + lcd->winenable &= ~(1 << plane); + lcd->window[plane].winctrl0 = winctrl0; + lcd->window[plane].winctrl1 = winctrl1; + lcd->window[plane].winbuf0 = + lcd->window[plane].winbuf1 = fbdev->fb_phys; + lcd->window[plane].winbufctrl = 0; /* select winbuf0 */ + lcd->winenable |= winenable; + au_sync(); + + return 0; +} + +static void au1200_setpanel (struct panel_settings *newpanel) +{ + /* + * Perform global setup/init of LCD controller + */ + uint32 winenable; + + /* Make sure all windows disabled */ + winenable = lcd->winenable; + lcd->winenable = 0; + au_sync(); + /* + * Ensure everything is disabled before reconfiguring + */ + if (lcd->screen & LCD_SCREEN_SEN) { + /* Wait for vertical sync period */ + lcd->intstatus = LCD_INT_SS; + while ((lcd->intstatus & LCD_INT_SS) == 0) { + au_sync(); + } + + lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/ + + do { + lcd->intstatus = lcd->intstatus; /*clear interrupts*/ + au_sync(); + /*wait for controller to shut down*/ + } while ((lcd->intstatus & LCD_INT_SD) == 0); + + /* Call shutdown of current panel (if up) */ + /* this must occur last, because if an external clock is driving + the controller, the clock cannot be turned off before first + shutting down the controller. + */ + if (panel->device_shutdown != NULL) + panel->device_shutdown(); + } + + /* Newpanel == NULL indicates a shutdown operation only */ + if (newpanel == NULL) + return; + + panel = newpanel; + + printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres); + + /* + * Setup clocking if internal LCD clock source (assumes sys_auxpll valid) + */ + if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT)) + { + uint32 sys_clksrc; + au_writel(panel->mode_auxpll, SYS_AUXPLL); + sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f; + sys_clksrc |= panel->mode_toyclksrc; + au_writel(sys_clksrc, SYS_CLKSRC); + } + + /* + * Configure panel timings + */ + lcd->screen = panel->mode_screen; + lcd->horztiming = panel->mode_horztiming; + lcd->verttiming = panel->mode_verttiming; + lcd->clkcontrol = panel->mode_clkcontrol; + lcd->pwmdiv = panel->mode_pwmdiv; + lcd->pwmhi = panel->mode_pwmhi; + lcd->outmask = panel->mode_outmask; + lcd->fifoctrl = panel->mode_fifoctrl; + au_sync(); + + /* fixme: Check window settings to make sure still valid + * for new geometry */ +#if 0 + au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos); + au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos); + au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos); + au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos); +#endif + lcd->winenable = winenable; + + /* + * Re-enable screen now that it is configured + */ + lcd->screen |= LCD_SCREEN_SEN; + au_sync(); + + /* Call init of panel */ + if (panel->device_init != NULL) panel->device_init(); + + /* FIX!!!! not appropriate on panel change!!! Global setup/init */ + lcd->intenable = 0; + lcd->intstatus = ~0; + lcd->backcolor = win->mode_backcolor; + + /* Setup Color Key - FIX!!! */ + lcd->colorkey = win->mode_colorkey; + lcd->colorkeymsk = win->mode_colorkeymsk; + + /* Setup HWCursor - FIX!!! Need to support this eventually */ + lcd->hwc.cursorctrl = 0; + lcd->hwc.cursorpos = 0; + lcd->hwc.cursorcolor0 = 0; + lcd->hwc.cursorcolor1 = 0; + lcd->hwc.cursorcolor2 = 0; + lcd->hwc.cursorcolor3 = 0; + + +#if 0 +#define D(X) printk("%25s: %08X\n", #X, X) + D(lcd->screen); + D(lcd->horztiming); + D(lcd->verttiming); + D(lcd->clkcontrol); + D(lcd->pwmdiv); + D(lcd->pwmhi); + D(lcd->outmask); + D(lcd->fifoctrl); + D(lcd->window[0].winctrl0); + D(lcd->window[0].winctrl1); + D(lcd->window[0].winctrl2); + D(lcd->window[0].winbuf0); + D(lcd->window[0].winbuf1); + D(lcd->window[0].winbufctrl); + D(lcd->window[1].winctrl0); + D(lcd->window[1].winctrl1); + D(lcd->window[1].winctrl2); + D(lcd->window[1].winbuf0); + D(lcd->window[1].winbuf1); + D(lcd->window[1].winbufctrl); + D(lcd->window[2].winctrl0); + D(lcd->window[2].winctrl1); + D(lcd->window[2].winctrl2); + D(lcd->window[2].winbuf0); + D(lcd->window[2].winbuf1); + D(lcd->window[2].winbufctrl); + D(lcd->window[3].winctrl0); + D(lcd->window[3].winctrl1); + D(lcd->window[3].winctrl2); + D(lcd->window[3].winbuf0); + D(lcd->window[3].winbuf1); + D(lcd->window[3].winbufctrl); + D(lcd->winenable); + D(lcd->intenable); + D(lcd->intstatus); + D(lcd->backcolor); + D(lcd->winenable); + D(lcd->colorkey); + D(lcd->colorkeymsk); + D(lcd->hwc.cursorctrl); + D(lcd->hwc.cursorpos); + D(lcd->hwc.cursorcolor0); + D(lcd->hwc.cursorcolor1); + D(lcd->hwc.cursorcolor2); + D(lcd->hwc.cursorcolor3); +#endif +} + +static void au1200_setmode(struct au1200fb_device *fbdev) +{ + int plane = fbdev->plane; + /* Window/plane setup */ + lcd->window[plane].winctrl1 = ( 0 + | LCD_WINCTRL1_PRI_N(plane) + | win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */ + ) ; + + au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos); + + lcd->window[plane].winctrl2 = ( 0 + | LCD_WINCTRL2_CKMODE_00 + | LCD_WINCTRL2_DBM + | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length) + | LCD_WINCTRL2_SCX_1 + | LCD_WINCTRL2_SCY_1 + ) ; + lcd->winenable |= win->w[plane].mode_winenable; + au_sync(); +} + + +/* Inline helpers */ + +/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/ +/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/ + +#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN) + +/* Bitfields format supported by the controller. */ +static struct fb_bitfield rgb_bitfields[][4] = { + /* Red, Green, Blue, Transp */ + [LCD_WINCTRL1_FRM_16BPP655 >> 25] = + { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPP565 >> 25] = + { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPP556 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPI1555 >> 25] = + { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPI5551 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPA1555 >> 25] = + { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } }, + + [LCD_WINCTRL1_FRM_16BPPA5551 >> 25] = + { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } }, + + [LCD_WINCTRL1_FRM_24BPP >> 25] = + { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } }, + + [LCD_WINCTRL1_FRM_32BPP >> 25] = + { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } }, +}; + +/*-------------------------------------------------------------------------*/ + +/* Helpers */ + +static void au1200fb_update_fbinfo(struct fb_info *fbi) +{ + /* FIX!!!! This also needs to take the window pixel format into account!!! */ + + /* Update var-dependent FB info */ + if (panel_is_color(panel)) { + if (fbi->var.bits_per_pixel <= 8) { + /* palettized */ + fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR; + fbi->fix.line_length = fbi->var.xres_virtual / + (8/fbi->var.bits_per_pixel); + } else { + /* non-palettized */ + fbi->fix.visual = FB_VISUAL_TRUECOLOR; + fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8); + } + } else { + /* mono FIX!!! mono 8 and 4 bits */ + fbi->fix.visual = FB_VISUAL_MONO10; + fbi->fix.line_length = fbi->var.xres_virtual / 8; + } + + fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual; + print_dbg("line length: %d\n", fbi->fix.line_length); + print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel); +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 framebuffer driver */ + +/* fb_check_var + * Validate var settings with hardware restrictions and modify it if necessary + */ +static int au1200fb_fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *fbi) +{ + struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi; + u32 pixclock; + int screen_size, plane; + + plane = fbdev->plane; + + /* Make sure that the mode respect all LCD controller and + * panel restrictions. */ + var->xres = win->w[plane].xres; + var->yres = win->w[plane].yres; + + /* No need for virtual resolution support */ + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1); + + screen_size = var->xres_virtual * var->yres_virtual; + if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8); + else screen_size /= (8/var->bits_per_pixel); + + if (fbdev->fb_len < screen_size) + return -EINVAL; /* Virtual screen is to big, abort */ + + /* FIX!!!! what are the implicaitons of ignoring this for windows ??? */ + /* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel + * clock can only be obtain by dividing this value by an even integer. + * Fallback to a slower pixel clock if necessary. */ + pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin); + pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2)); + + if (AU1200_LCD_MAX_CLK % pixclock) { + int diff = AU1200_LCD_MAX_CLK % pixclock; + pixclock -= diff; + } + + var->pixclock = KHZ2PICOS(pixclock/1000); +#if 0 + if (!panel_is_active(panel)) { + int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1; + + if (!panel_is_color(panel) + && (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) { + /* STN 8bit mono panel support is up to 6MHz pixclock */ + var->pixclock = KHZ2PICOS(6000); + } else if (!pcd) { + /* Other STN panel support is up to 12MHz */ + var->pixclock = KHZ2PICOS(12000); + } + } +#endif + /* Set bitfield accordingly */ + switch (var->bits_per_pixel) { + case 16: + { + /* 16bpp True color. + * These must be set to MATCH WINCTRL[FORM] */ + int idx; + idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25; + var->red = rgb_bitfields[idx][0]; + var->green = rgb_bitfields[idx][1]; + var->blue = rgb_bitfields[idx][2]; + var->transp = rgb_bitfields[idx][3]; + break; + } + + case 32: + { + /* 32bpp True color. + * These must be set to MATCH WINCTRL[FORM] */ + int idx; + idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25; + var->red = rgb_bitfields[idx][0]; + var->green = rgb_bitfields[idx][1]; + var->blue = rgb_bitfields[idx][2]; + var->transp = rgb_bitfields[idx][3]; + break; + } + default: + print_dbg("Unsupported depth %dbpp", var->bits_per_pixel); + return -EINVAL; + } + + return 0; +} + +/* fb_set_par + * Set hardware with var settings. This will enable the controller with a + * specific mode, normally validated with the fb_check_var method + */ +static int au1200fb_fb_set_par(struct fb_info *fbi) +{ + struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi; + + au1200fb_update_fbinfo(fbi); + au1200_setmode(fbdev); + + return 0; +} + +/* fb_setcolreg + * Set color in LCD palette. + */ +static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, struct fb_info *fbi) +{ + volatile u32 *palette = lcd->palette; + u32 value; + + if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1)) + return -EINVAL; + + if (fbi->var.grayscale) { + /* Convert color to grayscale */ + red = green = blue = + (19595 * red + 38470 * green + 7471 * blue) >> 16; + } + + if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) { + /* Place color in the pseudopalette */ + if (regno > 16) + return -EINVAL; + + palette = (u32*) fbi->pseudo_palette; + + red >>= (16 - fbi->var.red.length); + green >>= (16 - fbi->var.green.length); + blue >>= (16 - fbi->var.blue.length); + + value = (red << fbi->var.red.offset) | + (green << fbi->var.green.offset)| + (blue << fbi->var.blue.offset); + value &= 0xFFFF; + + } else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) { + /* COLOR TFT PALLETTIZED (use RGB 565) */ + value = (red & 0xF800)|((green >> 5) & + 0x07E0)|((blue >> 11) & 0x001F); + value &= 0xFFFF; + + } else if (0 /*panel_is_color(fbdev->panel)*/) { + /* COLOR STN MODE */ + value = 0x1234; + value &= 0xFFF; + } else { + /* MONOCHROME MODE */ + value = (green >> 12) & 0x000F; + value &= 0xF; + } + + palette[regno] = value; + + return 0; +} + +/* fb_blank + * Blank the screen. Depending on the mode, the screen will be + * activated with the backlight color, or desactivated + */ +static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi) +{ + /* Short-circuit screen blanking */ + if (noblanking) + return 0; + + switch (blank_mode) { + + case FB_BLANK_UNBLANK: + case FB_BLANK_NORMAL: + /* printk("turn on panel\n"); */ + au1200_setpanel(panel); + break; + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_POWERDOWN: + /* printk("turn off panel\n"); */ + au1200_setpanel(NULL); + break; + default: + break; + + } + + /* FB_BLANK_NORMAL is a soft blank */ + return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0; +} + +/* fb_mmap + * Map video memory in user space. We don't use the generic fb_mmap + * method mainly to allow the use of the TLB streaming flag (CCA=6) + */ +static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) + +{ + unsigned int len; + unsigned long start=0, off; + struct au1200fb_device *fbdev = (struct au1200fb_device *) info; + +#ifdef CONFIG_PM + au1xxx_pm_access(LCD_pm_dev); +#endif + + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { + return -EINVAL; + } + + start = fbdev->fb_phys & PAGE_MASK; + len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); + + off = vma->vm_pgoff << PAGE_SHIFT; + + if ((vma->vm_end - vma->vm_start + off) > len) { + return -EINVAL; + } + + off += start; + vma->vm_pgoff = off >> PAGE_SHIFT; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ + + vma->vm_flags |= VM_IO; + + return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + return 0; +} + +static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) +{ + + unsigned int hi1, divider; + + /* SCREEN_SIZE: user cannot reset size, must switch panel choice */ + + if (pdata->flags & SCREEN_BACKCOLOR) + lcd->backcolor = pdata->backcolor; + + if (pdata->flags & SCREEN_BRIGHTNESS) { + + // limit brightness pwm duty to >= 30/1600 + if (pdata->brightness < 30) { + pdata->brightness = 30; + } + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + hi1 = (lcd->pwmhi >> 16) + 1; + hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8); + lcd->pwmhi &= 0xFFFF; + lcd->pwmhi |= (hi1 << 16); + } + + if (pdata->flags & SCREEN_COLORKEY) + lcd->colorkey = pdata->colorkey; + + if (pdata->flags & SCREEN_MASK) + lcd->colorkeymsk = pdata->mask; + au_sync(); +} + +static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) +{ + unsigned int hi1, divider; + + pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1; + pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1; + + pdata->backcolor = lcd->backcolor; + pdata->colorkey = lcd->colorkey; + pdata->mask = lcd->colorkeymsk; + + // brightness + hi1 = (lcd->pwmhi >> 16) + 1; + divider = (lcd->pwmdiv & 0x3FFFF) + 1; + pdata->brightness = ((hi1 << 8) / divider) - 1; + au_sync(); +} + +static void set_window(unsigned int plane, + struct au1200_lcd_window_regs_t *pdata) +{ + unsigned int val, bpp; + + /* Window control register 0 */ + if (pdata->flags & WIN_POSITION) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX | + LCD_WINCTRL0_OY); + val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX); + val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY); + lcd->window[plane].winctrl0 = val; + } + if (pdata->flags & WIN_ALPHA_COLOR) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A); + val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A); + lcd->window[plane].winctrl0 = val; + } + if (pdata->flags & WIN_ALPHA_MODE) { + val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN); + val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN); + lcd->window[plane].winctrl0 = val; + } + + /* Window control register 1 */ + if (pdata->flags & WIN_PRIORITY) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI); + val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_CHANNEL) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE); + val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_BUFFER_FORMAT) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM); + val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_COLOR_ORDER) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO); + val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_PIXEL_ORDER) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO); + val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO); + lcd->window[plane].winctrl1 = val; + } + if (pdata->flags & WIN_SIZE) { + val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX | + LCD_WINCTRL1_SZY); + val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX); + val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY); + lcd->window[plane].winctrl1 = val; + /* program buffer line width */ + bpp = winbpp(val) / 8; + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX); + val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX); + lcd->window[plane].winctrl2 = val; + } + + /* Window control register 2 */ + if (pdata->flags & WIN_COLORKEY_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE); + val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE); + lcd->window[plane].winctrl2 = val; + } + if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM); + val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM); + lcd->window[plane].winctrl2 = val; + } + if (pdata->flags & WIN_RAM_ARRAY_MODE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM); + val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM); + lcd->window[plane].winctrl2 = val; + } + + /* Buffer line width programmed with WIN_SIZE */ + + if (pdata->flags & WIN_BUFFER_SCALE) { + val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX | + LCD_WINCTRL2_SCY); + val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX); + val |= ((pdata->ysize) & LCD_WINCTRL2_SCY); + lcd->window[plane].winctrl2 = val; + } + + if (pdata->flags & WIN_ENABLE) { + val = lcd->winenable; + val &= ~(1<<plane); + val |= (pdata->enable & 1) << plane; + lcd->winenable = val; + } + au_sync(); +} + +static void get_window(unsigned int plane, + struct au1200_lcd_window_regs_t *pdata) +{ + /* Window control register 0 */ + pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21; + pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10; + pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2; + pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1; + + /* Window control register 1 */ + pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30; + pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29; + pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25; + pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24; + pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22; + pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1; + pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1; + + /* Window control register 2 */ + pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24; + pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23; + pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21; + + pdata->enable = (lcd->winenable >> plane) & 1; + au_sync(); +} + +static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + int plane; + int val; + +#ifdef CONFIG_PM + au1xxx_pm_access(LCD_pm_dev); +#endif + + plane = fbinfo2index(info); + print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane); + + if (cmd == AU1200_LCD_FB_IOCTL) { + struct au1200_lcd_iodata_t iodata; + + if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata))) + return -EFAULT; + + print_dbg("FB IOCTL called\n"); + + switch (iodata.subcmd) { + case AU1200_LCD_SET_SCREEN: + print_dbg("AU1200_LCD_SET_SCREEN\n"); + set_global(cmd, &iodata.global); + break; + + case AU1200_LCD_GET_SCREEN: + print_dbg("AU1200_LCD_GET_SCREEN\n"); + get_global(cmd, &iodata.global); + break; + + case AU1200_LCD_SET_WINDOW: + print_dbg("AU1200_LCD_SET_WINDOW\n"); + set_window(plane, &iodata.window); + break; + + case AU1200_LCD_GET_WINDOW: + print_dbg("AU1200_LCD_GET_WINDOW\n"); + get_window(plane, &iodata.window); + break; + + case AU1200_LCD_SET_PANEL: + print_dbg("AU1200_LCD_SET_PANEL\n"); + if ((iodata.global.panel_choice >= 0) && + (iodata.global.panel_choice < + NUM_PANELS)) + { + struct panel_settings *newpanel; + panel_index = iodata.global.panel_choice; + newpanel = &known_lcd_panels[panel_index]; + au1200_setpanel(newpanel); + } + break; + + case AU1200_LCD_GET_PANEL: + print_dbg("AU1200_LCD_GET_PANEL\n"); + iodata.global.panel_choice = panel_index; + break; + + default: + return -EINVAL; + } + + val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata)); + if (val) { + print_dbg("error: could not copy %d bytes\n", val); + return -EFAULT; + } + } + + return 0; +} + + +static struct fb_ops au1200fb_fb_ops = { + .owner = THIS_MODULE, + .fb_check_var = au1200fb_fb_check_var, + .fb_set_par = au1200fb_fb_set_par, + .fb_setcolreg = au1200fb_fb_setcolreg, + .fb_blank = au1200fb_fb_blank, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_sync = NULL, + .fb_ioctl = au1200fb_ioctl, + .fb_mmap = au1200fb_fb_mmap, +}; + +/*-------------------------------------------------------------------------*/ + +static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id, struct pt_regs *regs) +{ + /* Nothing to do for now, just clear any pending interrupt */ + lcd->intstatus = lcd->intstatus; + au_sync(); + + return IRQ_HANDLED; +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 LCD device probe helpers */ + +static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev) +{ + struct fb_info *fbi = &fbdev->fb_info; + int bpp; + + memset(fbi, 0, sizeof(struct fb_info)); + fbi->fbops = &au1200fb_fb_ops; + + bpp = winbpp(win->w[fbdev->plane].mode_winctrl1); + + /* Copy monitor specs from panel data */ + /* fixme: we're setting up LCD controller windows, so these dont give a + damn as to what the monitor specs are (the panel itself does, but that + isnt done here...so maybe need a generic catchall monitor setting??? */ + memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs)); + + /* We first try the user mode passed in argument. If that failed, + * or if no one has been specified, we default to the first mode of the + * panel list. Note that after this call, var data will be set */ + if (!fb_find_mode(&fbi->var, + fbi, + NULL, /* drv_info.opt_mode, */ + fbi->monspecs.modedb, + fbi->monspecs.modedb_len, + fbi->monspecs.modedb, + bpp)) { + + print_err("Cannot find valid mode for panel %s", panel->name); + return -EFAULT; + } + + fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); + if (!fbi->pseudo_palette) { + return -ENOMEM; + } + memset(fbi->pseudo_palette, 0, sizeof(u32) * 16); + + if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { + print_err("Fail to allocate colormap (%d entries)", + AU1200_LCD_NBR_PALETTE_ENTRIES); + kfree(fbi->pseudo_palette); + return -EFAULT; + } + + strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id)); + fbi->fix.smem_start = fbdev->fb_phys; + fbi->fix.smem_len = fbdev->fb_len; + fbi->fix.type = FB_TYPE_PACKED_PIXELS; + fbi->fix.xpanstep = 0; + fbi->fix.ypanstep = 0; + fbi->fix.mmio_start = 0; + fbi->fix.mmio_len = 0; + fbi->fix.accel = FB_ACCEL_NONE; + + fbi->screen_base = (char __iomem *) fbdev->fb_mem; + + au1200fb_update_fbinfo(fbi); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +/* AU1200 LCD controller device driver */ + +static int au1200fb_drv_probe(struct device *dev) +{ + struct au1200fb_device *fbdev; + unsigned long page; + int bpp, plane, ret; + + if (!dev) + return -EINVAL; + + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) { + bpp = winbpp(win->w[plane].mode_winctrl1); + if (win->w[plane].xres == 0) + win->w[plane].xres = panel->Xres; + if (win->w[plane].yres == 0) + win->w[plane].yres = panel->Yres; + + fbdev = &_au1200fb_devices[plane]; + memset(fbdev, 0, sizeof(struct au1200fb_device)); + fbdev->plane = plane; + + /* Allocate the framebuffer to the maximum screen size */ + fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; + + fbdev->fb_mem = dma_alloc_noncoherent(dev, + PAGE_ALIGN(fbdev->fb_len), + &fbdev->fb_phys, GFP_KERNEL); + if (!fbdev->fb_mem) { + print_err("fail to allocate frambuffer (size: %dK))", + fbdev->fb_len / 1024); + return -ENOMEM; + } + + /* + * Set page reserved so that mmap will work. This is necessary + * since we'll be remapping normal memory. + */ + for (page = (unsigned long)fbdev->fb_phys; + page < PAGE_ALIGN((unsigned long)fbdev->fb_phys + + fbdev->fb_len); + page += PAGE_SIZE) { + SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */ + } + print_dbg("Framebuffer memory map at %p", fbdev->fb_mem); + print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024); + + /* Init FB data */ + if ((ret = au1200fb_init_fbinfo(fbdev)) < 0) + goto failed; + + /* Register new framebuffer */ + if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) { + print_err("cannot register new framebuffer"); + goto failed; + } + + au1200fb_fb_set_par(&fbdev->fb_info); + +#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) + if (plane == 0) + if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) { + /* Start display and show logo on boot */ + fb_set_cmap(&fbdev->fb_info.cmap, + &fbdev->fb_info); + + fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR); + } +#endif + } + + /* Now hook interrupt too */ + if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq, + SA_INTERRUPT | SA_SHIRQ, "lcd", (void *)dev)) < 0) { + print_err("fail to request interrupt line %d (err: %d)", + AU1200_LCD_INT, ret); + goto failed; + } + + return 0; + +failed: + /* NOTE: This only does the current plane/window that failed; others are still active */ + if (fbdev->fb_mem) + dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len), + fbdev->fb_mem, fbdev->fb_phys); + if (fbdev->fb_info.cmap.len != 0) + fb_dealloc_cmap(&fbdev->fb_info.cmap); + if (fbdev->fb_info.pseudo_palette) + kfree(fbdev->fb_info.pseudo_palette); + if (plane == 0) + free_irq(AU1200_LCD_INT, (void*)dev); + return ret; +} + +static int au1200fb_drv_remove(struct device *dev) +{ + struct au1200fb_device *fbdev; + int plane; + + if (!dev) + return -ENODEV; + + /* Turn off the panel */ + au1200_setpanel(NULL); + + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) + { + fbdev = &_au1200fb_devices[plane]; + + /* Clean up all probe data */ + unregister_framebuffer(&fbdev->fb_info); + if (fbdev->fb_mem) + dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len), + fbdev->fb_mem, fbdev->fb_phys); + if (fbdev->fb_info.cmap.len != 0) + fb_dealloc_cmap(&fbdev->fb_info.cmap); + if (fbdev->fb_info.pseudo_palette) + kfree(fbdev->fb_info.pseudo_palette); + } + + free_irq(AU1200_LCD_INT, (void *)dev); + + return 0; +} + +#ifdef CONFIG_PM +static int au1200fb_drv_suspend(struct device *dev, u32 state, u32 level) +{ + /* TODO */ + return 0; +} + +static int au1200fb_drv_resume(struct device *dev, u32 level) +{ + /* TODO */ + return 0; +} +#endif /* CONFIG_PM */ + +static struct device_driver au1200fb_driver = { + .name = "au1200-lcd", + .bus = &platform_bus_type, + .probe = au1200fb_drv_probe, + .remove = au1200fb_drv_remove, +#ifdef CONFIG_PM + .suspend = au1200fb_drv_suspend, + .resume = au1200fb_drv_resume, +#endif +}; + +/*-------------------------------------------------------------------------*/ + +/* Kernel driver */ + +static void au1200fb_setup(void) +{ + char* options = NULL; + char* this_opt; + int num_panels = ARRAY_SIZE(known_lcd_panels); + int panel_idx = -1; + + fb_get_options(DRIVER_NAME, &options); + + if (options) { + while ((this_opt = strsep(&options,",")) != NULL) { + /* Panel option - can be panel name, + * "bs" for board-switch, or number/index */ + if (!strncmp(this_opt, "panel:", 6)) { + int i; + long int li; + char *endptr; + this_opt += 6; + /* First check for index, which allows + * to short circuit this mess */ + li = simple_strtol(this_opt, &endptr, 0); + if (*endptr == '\0') { + panel_idx = (int)li; + } + else if (strcmp(this_opt, "bs") == 0) { + extern int board_au1200fb_panel(void); + panel_idx = board_au1200fb_panel(); + } + + else + for (i = 0; i < num_panels; i++) { + if (!strcmp(this_opt, known_lcd_panels[i].name)) { + panel_idx = i; + break; + } + } + + if ((panel_idx < 0) || (panel_idx >= num_panels)) { + print_warn("Panel %s not supported!", this_opt); + } + else + panel_index = panel_idx; + } + + else if (strncmp(this_opt, "nohwcursor", 10) == 0) { + nohwcursor = 1; + } + + /* Unsupported option */ + else { + print_warn("Unsupported option \"%s\"", this_opt); + } + } + } +} + +#ifdef CONFIG_PM +static int au1200fb_pm_callback(au1xxx_power_dev_t *dev, + au1xxx_request_t request, void *data) { + int retval = -1; + unsigned int d = 0; + unsigned int brightness = 0; + + if (request == AU1XXX_PM_SLEEP) { + board_au1200fb_panel_shutdown(); + } + else if (request == AU1XXX_PM_WAKEUP) { + if(dev->prev_state == SLEEP_STATE) + { + int plane; + au1200_setpanel(panel); + for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) { + struct au1200fb_device *fbdev; + fbdev = &_au1200fb_devices[plane]; + au1200fb_fb_set_par(&fbdev->fb_info); + } + } + + d = *((unsigned int*)data); + if(d <=10) brightness = 26; + else if(d<=20) brightness = 51; + else if(d<=30) brightness = 77; + else if(d<=40) brightness = 102; + else if(d<=50) brightness = 128; + else if(d<=60) brightness = 153; + else if(d<=70) brightness = 179; + else if(d<=80) brightness = 204; + else if(d<=90) brightness = 230; + else brightness = 255; + set_brightness(brightness); + } else if (request == AU1XXX_PM_GETSTATUS) { + return dev->cur_state; + } else if (request == AU1XXX_PM_ACCESS) { + if (dev->cur_state != SLEEP_STATE) + return retval; + else { + au1200_setpanel(panel); + } + } else if (request == AU1XXX_PM_IDLE) { + } else if (request == AU1XXX_PM_CLEANUP) { + } + + return retval; +} +#endif + +static int __init au1200fb_init(void) +{ + print_info("" DRIVER_DESC ""); + + /* Setup driver with options */ + au1200fb_setup(); + + /* Point to the panel selected */ + panel = &known_lcd_panels[panel_index]; + win = &windows[window_index]; + + printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name); + printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name); + + /* Kickstart the panel, the framebuffers/windows come soon enough */ + au1200_setpanel(panel); + + #ifdef CONFIG_PM + LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL); + if ( LCD_pm_dev == NULL) + printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n"); + else + printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n"); + #endif + + return driver_register(&au1200fb_driver); +} + +static void __exit au1200fb_cleanup(void) +{ + driver_unregister(&au1200fb_driver); +} + +module_init(au1200fb_init); +module_exit(au1200fb_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/au1200fb.h b/drivers/video/au1200fb.h new file mode 100644 index 000000000000..e2672714d8d4 --- /dev/null +++ b/drivers/video/au1200fb.h @@ -0,0 +1,572 @@ +/* + * BRIEF MODULE DESCRIPTION + * Hardware definitions for the Au1200 LCD controller + * + * Copyright 2004 AMD + * Author: AMD + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _AU1200LCD_H +#define _AU1200LCD_H + +/********************************************************************/ +#define AU1200_LCD_ADDR 0xB5000000 + +#define uint8 unsigned char +#define uint32 unsigned int + +struct au1200_lcd { + volatile uint32 reserved0; + volatile uint32 screen; + volatile uint32 backcolor; + volatile uint32 horztiming; + volatile uint32 verttiming; + volatile uint32 clkcontrol; + volatile uint32 pwmdiv; + volatile uint32 pwmhi; + volatile uint32 reserved1; + volatile uint32 winenable; + volatile uint32 colorkey; + volatile uint32 colorkeymsk; + struct + { + volatile uint32 cursorctrl; + volatile uint32 cursorpos; + volatile uint32 cursorcolor0; + volatile uint32 cursorcolor1; + volatile uint32 cursorcolor2; + uint32 cursorcolor3; + } hwc; + volatile uint32 intstatus; + volatile uint32 intenable; + volatile uint32 outmask; + volatile uint32 fifoctrl; + uint32 reserved2[(0x0100-0x0058)/4]; + struct + { + volatile uint32 winctrl0; + volatile uint32 winctrl1; + volatile uint32 winctrl2; + volatile uint32 winbuf0; + volatile uint32 winbuf1; + volatile uint32 winbufctrl; + uint32 winreserved0; + uint32 winreserved1; + } window[4]; + + uint32 reserved3[(0x0400-0x0180)/4]; + + volatile uint32 palette[(0x0800-0x0400)/4]; + + volatile uint8 cursorpattern[256]; +}; + +/* lcd_screen */ +#define LCD_SCREEN_SEN (1<<31) +#define LCD_SCREEN_SX (0x07FF<<19) +#define LCD_SCREEN_SY (0x07FF<< 8) +#define LCD_SCREEN_SWP (1<<7) +#define LCD_SCREEN_SWD (1<<6) +#define LCD_SCREEN_PT (7<<0) +#define LCD_SCREEN_PT_TFT (0<<0) +#define LCD_SCREEN_SX_N(WIDTH) ((WIDTH-1)<<19) +#define LCD_SCREEN_SY_N(HEIGHT) ((HEIGHT-1)<<8) +#define LCD_SCREEN_PT_CSTN (1<<0) +#define LCD_SCREEN_PT_CDSTN (2<<0) +#define LCD_SCREEN_PT_M8STN (3<<0) +#define LCD_SCREEN_PT_M4STN (4<<0) + +/* lcd_backcolor */ +#define LCD_BACKCOLOR_SBGR (0xFF<<16) +#define LCD_BACKCOLOR_SBGG (0xFF<<8) +#define LCD_BACKCOLOR_SBGB (0xFF<<0) +#define LCD_BACKCOLOR_SBGR_N(N) ((N)<<16) +#define LCD_BACKCOLOR_SBGG_N(N) ((N)<<8) +#define LCD_BACKCOLOR_SBGB_N(N) ((N)<<0) + +/* lcd_winenable */ +#define LCD_WINENABLE_WEN3 (1<<3) +#define LCD_WINENABLE_WEN2 (1<<2) +#define LCD_WINENABLE_WEN1 (1<<1) +#define LCD_WINENABLE_WEN0 (1<<0) + +/* lcd_colorkey */ +#define LCD_COLORKEY_CKR (0xFF<<16) +#define LCD_COLORKEY_CKG (0xFF<<8) +#define LCD_COLORKEY_CKB (0xFF<<0) +#define LCD_COLORKEY_CKR_N(N) ((N)<<16) +#define LCD_COLORKEY_CKG_N(N) ((N)<<8) +#define LCD_COLORKEY_CKB_N(N) ((N)<<0) + +/* lcd_colorkeymsk */ +#define LCD_COLORKEYMSK_CKMR (0xFF<<16) +#define LCD_COLORKEYMSK_CKMG (0xFF<<8) +#define LCD_COLORKEYMSK_CKMB (0xFF<<0) +#define LCD_COLORKEYMSK_CKMR_N(N) ((N)<<16) +#define LCD_COLORKEYMSK_CKMG_N(N) ((N)<<8) +#define LCD_COLORKEYMSK_CKMB_N(N) ((N)<<0) + +/* lcd windows control 0 */ +#define LCD_WINCTRL0_OX (0x07FF<<21) +#define LCD_WINCTRL0_OY (0x07FF<<10) +#define LCD_WINCTRL0_A (0x00FF<<2) +#define LCD_WINCTRL0_AEN (1<<1) +#define LCD_WINCTRL0_OX_N(N) ((N)<<21) +#define LCD_WINCTRL0_OY_N(N) ((N)<<10) +#define LCD_WINCTRL0_A_N(N) ((N)<<2) + +/* lcd windows control 1 */ +#define LCD_WINCTRL1_PRI (3<<30) +#define LCD_WINCTRL1_PIPE (1<<29) +#define LCD_WINCTRL1_FRM (0xF<<25) +#define LCD_WINCTRL1_CCO (1<<24) +#define LCD_WINCTRL1_PO (3<<22) +#define LCD_WINCTRL1_SZX (0x07FF<<11) +#define LCD_WINCTRL1_SZY (0x07FF<<0) +#define LCD_WINCTRL1_FRM_1BPP (0<<25) +#define LCD_WINCTRL1_FRM_2BPP (1<<25) +#define LCD_WINCTRL1_FRM_4BPP (2<<25) +#define LCD_WINCTRL1_FRM_8BPP (3<<25) +#define LCD_WINCTRL1_FRM_12BPP (4<<25) +#define LCD_WINCTRL1_FRM_16BPP655 (5<<25) +#define LCD_WINCTRL1_FRM_16BPP565 (6<<25) +#define LCD_WINCTRL1_FRM_16BPP556 (7<<25) +#define LCD_WINCTRL1_FRM_16BPPI1555 (8<<25) +#define LCD_WINCTRL1_FRM_16BPPI5551 (9<<25) +#define LCD_WINCTRL1_FRM_16BPPA1555 (10<<25) +#define LCD_WINCTRL1_FRM_16BPPA5551 (11<<25) +#define LCD_WINCTRL1_FRM_24BPP (12<<25) +#define LCD_WINCTRL1_FRM_32BPP (13<<25) +#define LCD_WINCTRL1_PRI_N(N) ((N)<<30) +#define LCD_WINCTRL1_PO_00 (0<<22) +#define LCD_WINCTRL1_PO_01 (1<<22) +#define LCD_WINCTRL1_PO_10 (2<<22) +#define LCD_WINCTRL1_PO_11 (3<<22) +#define LCD_WINCTRL1_SZX_N(N) ((N-1)<<11) +#define LCD_WINCTRL1_SZY_N(N) ((N-1)<<0) + +/* lcd windows control 2 */ +#define LCD_WINCTRL2_CKMODE (3<<24) +#define LCD_WINCTRL2_DBM (1<<23) +#define LCD_WINCTRL2_RAM (3<<21) +#define LCD_WINCTRL2_BX (0x1FFF<<8) +#define LCD_WINCTRL2_SCX (0xF<<4) +#define LCD_WINCTRL2_SCY (0xF<<0) +#define LCD_WINCTRL2_CKMODE_00 (0<<24) +#define LCD_WINCTRL2_CKMODE_01 (1<<24) +#define LCD_WINCTRL2_CKMODE_10 (2<<24) +#define LCD_WINCTRL2_CKMODE_11 (3<<24) +#define LCD_WINCTRL2_RAM_NONE (0<<21) +#define LCD_WINCTRL2_RAM_PALETTE (1<<21) +#define LCD_WINCTRL2_RAM_GAMMA (2<<21) +#define LCD_WINCTRL2_RAM_BUFFER (3<<21) +#define LCD_WINCTRL2_BX_N(N) ((N)<<8) +#define LCD_WINCTRL2_SCX_1 (0<<4) +#define LCD_WINCTRL2_SCX_2 (1<<4) +#define LCD_WINCTRL2_SCX_4 (2<<4) +#define LCD_WINCTRL2_SCY_1 (0<<0) +#define LCD_WINCTRL2_SCY_2 (1<<0) +#define LCD_WINCTRL2_SCY_4 (2<<0) + +/* lcd windows buffer control */ +#define LCD_WINBUFCTRL_DB (1<<1) +#define LCD_WINBUFCTRL_DBN (1<<0) + +/* lcd_intstatus, lcd_intenable */ +#define LCD_INT_IFO (0xF<<14) +#define LCD_INT_IFU (0xF<<10) +#define LCD_INT_OFO (1<<9) +#define LCD_INT_OFU (1<<8) +#define LCD_INT_WAIT (1<<3) +#define LCD_INT_SD (1<<2) +#define LCD_INT_SA (1<<1) +#define LCD_INT_SS (1<<0) + +/* lcd_horztiming */ +#define LCD_HORZTIMING_HND2 (0x1FF<<18) +#define LCD_HORZTIMING_HND1 (0x1FF<<9) +#define LCD_HORZTIMING_HPW (0x1FF<<0) +#define LCD_HORZTIMING_HND2_N(N)(((N)-1)<<18) +#define LCD_HORZTIMING_HND1_N(N)(((N)-1)<<9) +#define LCD_HORZTIMING_HPW_N(N) (((N)-1)<<0) + +/* lcd_verttiming */ +#define LCD_VERTTIMING_VND2 (0x1FF<<18) +#define LCD_VERTTIMING_VND1 (0x1FF<<9) +#define LCD_VERTTIMING_VPW (0x1FF<<0) +#define LCD_VERTTIMING_VND2_N(N)(((N)-1)<<18) +#define LCD_VERTTIMING_VND1_N(N)(((N)-1)<<9) +#define LCD_VERTTIMING_VPW_N(N) (((N)-1)<<0) + +/* lcd_clkcontrol */ +#define LCD_CLKCONTROL_EXT (1<<22) +#define LCD_CLKCONTROL_DELAY (3<<20) +#define LCD_CLKCONTROL_CDD (1<<19) +#define LCD_CLKCONTROL_IB (1<<18) +#define LCD_CLKCONTROL_IC (1<<17) +#define LCD_CLKCONTROL_IH (1<<16) +#define LCD_CLKCONTROL_IV (1<<15) +#define LCD_CLKCONTROL_BF (0x1F<<10) +#define LCD_CLKCONTROL_PCD (0x3FF<<0) +#define LCD_CLKCONTROL_BF_N(N) (((N)-1)<<10) +#define LCD_CLKCONTROL_PCD_N(N) ((N)<<0) + +/* lcd_pwmdiv */ +#define LCD_PWMDIV_EN (1<<31) +#define LCD_PWMDIV_PWMDIV (0x1FFFF<<0) +#define LCD_PWMDIV_PWMDIV_N(N) ((N)<<0) + +/* lcd_pwmhi */ +#define LCD_PWMHI_PWMHI1 (0xFFFF<<16) +#define LCD_PWMHI_PWMHI0 (0xFFFF<<0) +#define LCD_PWMHI_PWMHI1_N(N) ((N)<<16) +#define LCD_PWMHI_PWMHI0_N(N) ((N)<<0) + +/* lcd_hwccon */ +#define LCD_HWCCON_EN (1<<0) + +/* lcd_cursorpos */ +#define LCD_CURSORPOS_HWCXOFF (0x1F<<27) +#define LCD_CURSORPOS_HWCXPOS (0x07FF<<16) +#define LCD_CURSORPOS_HWCYOFF (0x1F<<11) +#define LCD_CURSORPOS_HWCYPOS (0x07FF<<0) +#define LCD_CURSORPOS_HWCXOFF_N(N) ((N)<<27) +#define LCD_CURSORPOS_HWCXPOS_N(N) ((N)<<16) +#define LCD_CURSORPOS_HWCYOFF_N(N) ((N)<<11) +#define LCD_CURSORPOS_HWCYPOS_N(N) ((N)<<0) + +/* lcd_cursorcolor */ +#define LCD_CURSORCOLOR_HWCA (0xFF<<24) +#define LCD_CURSORCOLOR_HWCR (0xFF<<16) +#define LCD_CURSORCOLOR_HWCG (0xFF<<8) +#define LCD_CURSORCOLOR_HWCB (0xFF<<0) +#define LCD_CURSORCOLOR_HWCA_N(N) ((N)<<24) +#define LCD_CURSORCOLOR_HWCR_N(N) ((N)<<16) +#define LCD_CURSORCOLOR_HWCG_N(N) ((N)<<8) +#define LCD_CURSORCOLOR_HWCB_N(N) ((N)<<0) + +/* lcd_fifoctrl */ +#define LCD_FIFOCTRL_F3IF (1<<29) +#define LCD_FIFOCTRL_F3REQ (0x1F<<24) +#define LCD_FIFOCTRL_F2IF (1<<29) +#define LCD_FIFOCTRL_F2REQ (0x1F<<16) +#define LCD_FIFOCTRL_F1IF (1<<29) +#define LCD_FIFOCTRL_F1REQ (0x1F<<8) +#define LCD_FIFOCTRL_F0IF (1<<29) +#define LCD_FIFOCTRL_F0REQ (0x1F<<0) +#define LCD_FIFOCTRL_F3REQ_N(N) ((N-1)<<24) +#define LCD_FIFOCTRL_F2REQ_N(N) ((N-1)<<16) +#define LCD_FIFOCTRL_F1REQ_N(N) ((N-1)<<8) +#define LCD_FIFOCTRL_F0REQ_N(N) ((N-1)<<0) + +/* lcd_outmask */ +#define LCD_OUTMASK_MASK (0x00FFFFFF) + +/********************************************************************/ +#endif /* _AU1200LCD_H */ +/* + * BRIEF MODULE DESCRIPTION + * Hardware definitions for the Au1200 LCD controller + * + * Copyright 2004 AMD + * Author: AMD + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _AU1200LCD_H +#define _AU1200LCD_H + +/********************************************************************/ +#define AU1200_LCD_ADDR 0xB5000000 + +#define uint8 unsigned char +#define uint32 unsigned int + +struct au1200_lcd { + volatile uint32 reserved0; + volatile uint32 screen; + volatile uint32 backcolor; + volatile uint32 horztiming; + volatile uint32 verttiming; + volatile uint32 clkcontrol; + volatile uint32 pwmdiv; + volatile uint32 pwmhi; + volatile uint32 reserved1; + volatile uint32 winenable; + volatile uint32 colorkey; + volatile uint32 colorkeymsk; + struct + { + volatile uint32 cursorctrl; + volatile uint32 cursorpos; + volatile uint32 cursorcolor0; + volatile uint32 cursorcolor1; + volatile uint32 cursorcolor2; + uint32 cursorcolor3; + } hwc; + volatile uint32 intstatus; + volatile uint32 intenable; + volatile uint32 outmask; + volatile uint32 fifoctrl; + uint32 reserved2[(0x0100-0x0058)/4]; + struct + { + volatile uint32 winctrl0; + volatile uint32 winctrl1; + volatile uint32 winctrl2; + volatile uint32 winbuf0; + volatile uint32 winbuf1; + volatile uint32 winbufctrl; + uint32 winreserved0; + uint32 winreserved1; + } window[4]; + + uint32 reserved3[(0x0400-0x0180)/4]; + + volatile uint32 palette[(0x0800-0x0400)/4]; + + volatile uint8 cursorpattern[256]; +}; + +/* lcd_screen */ +#define LCD_SCREEN_SEN (1<<31) +#define LCD_SCREEN_SX (0x07FF<<19) +#define LCD_SCREEN_SY (0x07FF<< 8) +#define LCD_SCREEN_SWP (1<<7) +#define LCD_SCREEN_SWD (1<<6) +#define LCD_SCREEN_PT (7<<0) +#define LCD_SCREEN_PT_TFT (0<<0) +#define LCD_SCREEN_SX_N(WIDTH) ((WIDTH-1)<<19) +#define LCD_SCREEN_SY_N(HEIGHT) ((HEIGHT-1)<<8) +#define LCD_SCREEN_PT_CSTN (1<<0) +#define LCD_SCREEN_PT_CDSTN (2<<0) +#define LCD_SCREEN_PT_M8STN (3<<0) +#define LCD_SCREEN_PT_M4STN (4<<0) + +/* lcd_backcolor */ +#define LCD_BACKCOLOR_SBGR (0xFF<<16) +#define LCD_BACKCOLOR_SBGG (0xFF<<8) +#define LCD_BACKCOLOR_SBGB (0xFF<<0) +#define LCD_BACKCOLOR_SBGR_N(N) ((N)<<16) +#define LCD_BACKCOLOR_SBGG_N(N) ((N)<<8) +#define LCD_BACKCOLOR_SBGB_N(N) ((N)<<0) + +/* lcd_winenable */ +#define LCD_WINENABLE_WEN3 (1<<3) +#define LCD_WINENABLE_WEN2 (1<<2) +#define LCD_WINENABLE_WEN1 (1<<1) +#define LCD_WINENABLE_WEN0 (1<<0) + +/* lcd_colorkey */ +#define LCD_COLORKEY_CKR (0xFF<<16) +#define LCD_COLORKEY_CKG (0xFF<<8) +#define LCD_COLORKEY_CKB (0xFF<<0) +#define LCD_COLORKEY_CKR_N(N) ((N)<<16) +#define LCD_COLORKEY_CKG_N(N) ((N)<<8) +#define LCD_COLORKEY_CKB_N(N) ((N)<<0) + +/* lcd_colorkeymsk */ +#define LCD_COLORKEYMSK_CKMR (0xFF<<16) +#define LCD_COLORKEYMSK_CKMG (0xFF<<8) +#define LCD_COLORKEYMSK_CKMB (0xFF<<0) +#define LCD_COLORKEYMSK_CKMR_N(N) ((N)<<16) +#define LCD_COLORKEYMSK_CKMG_N(N) ((N)<<8) +#define LCD_COLORKEYMSK_CKMB_N(N) ((N)<<0) + +/* lcd windows control 0 */ +#define LCD_WINCTRL0_OX (0x07FF<<21) +#define LCD_WINCTRL0_OY (0x07FF<<10) +#define LCD_WINCTRL0_A (0x00FF<<2) +#define LCD_WINCTRL0_AEN (1<<1) +#define LCD_WINCTRL0_OX_N(N) ((N)<<21) +#define LCD_WINCTRL0_OY_N(N) ((N)<<10) +#define LCD_WINCTRL0_A_N(N) ((N)<<2) + +/* lcd windows control 1 */ +#define LCD_WINCTRL1_PRI (3<<30) +#define LCD_WINCTRL1_PIPE (1<<29) +#define LCD_WINCTRL1_FRM (0xF<<25) +#define LCD_WINCTRL1_CCO (1<<24) +#define LCD_WINCTRL1_PO (3<<22) +#define LCD_WINCTRL1_SZX (0x07FF<<11) +#define LCD_WINCTRL1_SZY (0x07FF<<0) +#define LCD_WINCTRL1_FRM_1BPP (0<<25) +#define LCD_WINCTRL1_FRM_2BPP (1<<25) +#define LCD_WINCTRL1_FRM_4BPP (2<<25) +#define LCD_WINCTRL1_FRM_8BPP (3<<25) +#define LCD_WINCTRL1_FRM_12BPP (4<<25) +#define LCD_WINCTRL1_FRM_16BPP655 (5<<25) +#define LCD_WINCTRL1_FRM_16BPP565 (6<<25) +#define LCD_WINCTRL1_FRM_16BPP556 (7<<25) +#define LCD_WINCTRL1_FRM_16BPPI1555 (8<<25) +#define LCD_WINCTRL1_FRM_16BPPI5551 (9<<25) +#define LCD_WINCTRL1_FRM_16BPPA1555 (10<<25) +#define LCD_WINCTRL1_FRM_16BPPA5551 (11<<25) +#define LCD_WINCTRL1_FRM_24BPP (12<<25) +#define LCD_WINCTRL1_FRM_32BPP (13<<25) +#define LCD_WINCTRL1_PRI_N(N) ((N)<<30) +#define LCD_WINCTRL1_PO_00 (0<<22) +#define LCD_WINCTRL1_PO_01 (1<<22) +#define LCD_WINCTRL1_PO_10 (2<<22) +#define LCD_WINCTRL1_PO_11 (3<<22) +#define LCD_WINCTRL1_SZX_N(N) ((N-1)<<11) +#define LCD_WINCTRL1_SZY_N(N) ((N-1)<<0) + +/* lcd windows control 2 */ +#define LCD_WINCTRL2_CKMODE (3<<24) +#define LCD_WINCTRL2_DBM (1<<23) +#define LCD_WINCTRL2_RAM (3<<21) +#define LCD_WINCTRL2_BX (0x1FFF<<8) +#define LCD_WINCTRL2_SCX (0xF<<4) +#define LCD_WINCTRL2_SCY (0xF<<0) +#define LCD_WINCTRL2_CKMODE_00 (0<<24) +#define LCD_WINCTRL2_CKMODE_01 (1<<24) +#define LCD_WINCTRL2_CKMODE_10 (2<<24) +#define LCD_WINCTRL2_CKMODE_11 (3<<24) +#define LCD_WINCTRL2_RAM_NONE (0<<21) +#define LCD_WINCTRL2_RAM_PALETTE (1<<21) +#define LCD_WINCTRL2_RAM_GAMMA (2<<21) +#define LCD_WINCTRL2_RAM_BUFFER (3<<21) +#define LCD_WINCTRL2_BX_N(N) ((N)<<8) +#define LCD_WINCTRL2_SCX_1 (0<<4) +#define LCD_WINCTRL2_SCX_2 (1<<4) +#define LCD_WINCTRL2_SCX_4 (2<<4) +#define LCD_WINCTRL2_SCY_1 (0<<0) +#define LCD_WINCTRL2_SCY_2 (1<<0) +#define LCD_WINCTRL2_SCY_4 (2<<0) + +/* lcd windows buffer control */ +#define LCD_WINBUFCTRL_DB (1<<1) +#define LCD_WINBUFCTRL_DBN (1<<0) + +/* lcd_intstatus, lcd_intenable */ +#define LCD_INT_IFO (0xF<<14) +#define LCD_INT_IFU (0xF<<10) +#define LCD_INT_OFO (1<<9) +#define LCD_INT_OFU (1<<8) +#define LCD_INT_WAIT (1<<3) +#define LCD_INT_SD (1<<2) +#define LCD_INT_SA (1<<1) +#define LCD_INT_SS (1<<0) + +/* lcd_horztiming */ +#define LCD_HORZTIMING_HND2 (0x1FF<<18) +#define LCD_HORZTIMING_HND1 (0x1FF<<9) +#define LCD_HORZTIMING_HPW (0x1FF<<0) +#define LCD_HORZTIMING_HND2_N(N)(((N)-1)<<18) +#define LCD_HORZTIMING_HND1_N(N)(((N)-1)<<9) +#define LCD_HORZTIMING_HPW_N(N) (((N)-1)<<0) + +/* lcd_verttiming */ +#define LCD_VERTTIMING_VND2 (0x1FF<<18) +#define LCD_VERTTIMING_VND1 (0x1FF<<9) +#define LCD_VERTTIMING_VPW (0x1FF<<0) +#define LCD_VERTTIMING_VND2_N(N)(((N)-1)<<18) +#define LCD_VERTTIMING_VND1_N(N)(((N)-1)<<9) +#define LCD_VERTTIMING_VPW_N(N) (((N)-1)<<0) + +/* lcd_clkcontrol */ +#define LCD_CLKCONTROL_EXT (1<<22) +#define LCD_CLKCONTROL_DELAY (3<<20) +#define LCD_CLKCONTROL_CDD (1<<19) +#define LCD_CLKCONTROL_IB (1<<18) +#define LCD_CLKCONTROL_IC (1<<17) +#define LCD_CLKCONTROL_IH (1<<16) +#define LCD_CLKCONTROL_IV (1<<15) +#define LCD_CLKCONTROL_BF (0x1F<<10) +#define LCD_CLKCONTROL_PCD (0x3FF<<0) +#define LCD_CLKCONTROL_BF_N(N) (((N)-1)<<10) +#define LCD_CLKCONTROL_PCD_N(N) ((N)<<0) + +/* lcd_pwmdiv */ +#define LCD_PWMDIV_EN (1<<31) +#define LCD_PWMDIV_PWMDIV (0x1FFFF<<0) +#define LCD_PWMDIV_PWMDIV_N(N) ((N)<<0) + +/* lcd_pwmhi */ +#define LCD_PWMHI_PWMHI1 (0xFFFF<<16) +#define LCD_PWMHI_PWMHI0 (0xFFFF<<0) +#define LCD_PWMHI_PWMHI1_N(N) ((N)<<16) +#define LCD_PWMHI_PWMHI0_N(N) ((N)<<0) + +/* lcd_hwccon */ +#define LCD_HWCCON_EN (1<<0) + +/* lcd_cursorpos */ +#define LCD_CURSORPOS_HWCXOFF (0x1F<<27) +#define LCD_CURSORPOS_HWCXPOS (0x07FF<<16) +#define LCD_CURSORPOS_HWCYOFF (0x1F<<11) +#define LCD_CURSORPOS_HWCYPOS (0x07FF<<0) +#define LCD_CURSORPOS_HWCXOFF_N(N) ((N)<<27) +#define LCD_CURSORPOS_HWCXPOS_N(N) ((N)<<16) +#define LCD_CURSORPOS_HWCYOFF_N(N) ((N)<<11) +#define LCD_CURSORPOS_HWCYPOS_N(N) ((N)<<0) + +/* lcd_cursorcolor */ +#define LCD_CURSORCOLOR_HWCA (0xFF<<24) +#define LCD_CURSORCOLOR_HWCR (0xFF<<16) +#define LCD_CURSORCOLOR_HWCG (0xFF<<8) +#define LCD_CURSORCOLOR_HWCB (0xFF<<0) +#define LCD_CURSORCOLOR_HWCA_N(N) ((N)<<24) +#define LCD_CURSORCOLOR_HWCR_N(N) ((N)<<16) +#define LCD_CURSORCOLOR_HWCG_N(N) ((N)<<8) +#define LCD_CURSORCOLOR_HWCB_N(N) ((N)<<0) + +/* lcd_fifoctrl */ +#define LCD_FIFOCTRL_F3IF (1<<29) +#define LCD_FIFOCTRL_F3REQ (0x1F<<24) +#define LCD_FIFOCTRL_F2IF (1<<29) +#define LCD_FIFOCTRL_F2REQ (0x1F<<16) +#define LCD_FIFOCTRL_F1IF (1<<29) +#define LCD_FIFOCTRL_F1REQ (0x1F<<8) +#define LCD_FIFOCTRL_F0IF (1<<29) +#define LCD_FIFOCTRL_F0REQ (0x1F<<0) +#define LCD_FIFOCTRL_F3REQ_N(N) ((N-1)<<24) +#define LCD_FIFOCTRL_F2REQ_N(N) ((N-1)<<16) +#define LCD_FIFOCTRL_F1REQ_N(N) ((N-1)<<8) +#define LCD_FIFOCTRL_F0REQ_N(N) ((N-1)<<0) + +/* lcd_outmask */ +#define LCD_OUTMASK_MASK (0x00FFFFFF) + +/********************************************************************/ +#endif /* _AU1200LCD_H */ diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c index bc061d4ec786..72ff6bf75e5e 100644 --- a/drivers/video/chipsfb.c +++ b/drivers/video/chipsfb.c @@ -178,8 +178,6 @@ struct chips_init_reg { unsigned char data; }; -#define N_ELTS(x) (sizeof(x) / sizeof(x[0])) - static struct chips_init_reg chips_init_sr[] = { { 0x00, 0x03 }, { 0x01, 0x01 }, @@ -287,18 +285,18 @@ static void __init chips_hw_init(void) { int i; - for (i = 0; i < N_ELTS(chips_init_xr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_xr); ++i) write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); outb(0x29, 0x3c2); /* set misc output reg */ - for (i = 0; i < N_ELTS(chips_init_sr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_sr); ++i) write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); - for (i = 0; i < N_ELTS(chips_init_gr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_gr); ++i) write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); - for (i = 0; i < N_ELTS(chips_init_ar); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_ar); ++i) write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); - for (i = 0; i < N_ELTS(chips_init_cr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_cr); ++i) write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); - for (i = 0; i < N_ELTS(chips_init_fr); ++i) + for (i = 0; i < ARRAY_SIZE(chips_init_fr); ++i) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 6ee449858a5c..4444bef68fba 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -26,6 +26,30 @@ config VGA_CONSOLE # fi # fi +config VGACON_SOFT_SCROLLBACK + bool "Enable Scrollback Buffer in System RAM" + depends on VGA_CONSOLE + default n + help + The scrollback buffer of the standard VGA console is located in + the VGA RAM. The size of this RAM is fixed and is quite small. + If you require a larger scrollback buffer, this can be placed in + System RAM which is dynamically allocated during intialization. + Placing the scrollback buffer in System RAM will slightly slow + down the console. + + If you want this feature, say 'Y' here and enter the amount of + RAM to allocate for this buffer. If unsure, say 'N'. + +config VGACON_SOFT_SCROLLBACK_SIZE + int "Scrollback Buffer Size (in KB)" + depends on VGACON_SOFT_SCROLLBACK + default "64" + help + Enter the amount of System RAM to allocate for the scrollback + buffer. Each 64KB will give you approximately 16 80x25 + screenfuls of scrollback buffer + config VIDEO_SELECT bool "Video mode selection support" depends on X86 && VGA_CONSOLE diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c index 4fd07d9eca03..0cc1bfda76a6 100644 --- a/drivers/video/console/fonts.c +++ b/drivers/video/console/fonts.c @@ -66,7 +66,7 @@ static const struct font_desc *fonts[] = { #endif }; -#define num_fonts (sizeof(fonts)/sizeof(*fonts)) +#define num_fonts ARRAY_SIZE(fonts) #ifdef NO_FONTS #error No fonts configured. diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 762c7a593141..e99fe30e568c 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -149,7 +149,7 @@ static inline void newport_clear_lines(int ystart, int yend, int ci) newport_clear_screen(0, ystart, 1280 + 63, yend, ci); } -void newport_reset(void) +static void newport_reset(void) { unsigned short treg; int i; @@ -193,7 +193,7 @@ void newport_reset(void) * calculate the actual screen size by reading * the video timing out of the VC2 */ -void newport_get_screensize(void) +static void newport_get_screensize(void) { int i, cols; unsigned short ventry, treg; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 5a86978537d2..d5a04b68c4d4 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -93,7 +93,6 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static unsigned long vgacon_uni_pagedir[2]; - /* Description of the hardware situation */ static unsigned long vga_vram_base; /* Base of video memory */ static unsigned long vga_vram_end; /* End of video memory */ @@ -161,6 +160,201 @@ static inline void write_vga(unsigned char reg, unsigned int val) spin_unlock_irqrestore(&vga_lock, flags); } +static inline void vga_set_mem_top(struct vc_data *c) +{ + write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); +} + +#ifdef CONFIG_VGACON_SOFT_SCROLLBACK +#include <linux/bootmem.h> +/* software scrollback */ +static void *vgacon_scrollback; +static int vgacon_scrollback_tail; +static int vgacon_scrollback_size; +static int vgacon_scrollback_rows; +static int vgacon_scrollback_cnt; +static int vgacon_scrollback_cur; +static int vgacon_scrollback_save; +static int vgacon_scrollback_restore; + +static void vgacon_scrollback_init(int pitch) +{ + int rows = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024/pitch; + + if (vgacon_scrollback) { + vgacon_scrollback_cnt = 0; + vgacon_scrollback_tail = 0; + vgacon_scrollback_cur = 0; + vgacon_scrollback_rows = rows - 1; + vgacon_scrollback_size = rows * pitch; + } +} + +static void __init vgacon_scrollback_startup(void) +{ + vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE + * 1024); + vgacon_scrollback_init(vga_video_num_columns * 2); +} + +static void vgacon_scrollback_update(struct vc_data *c, int t, int count) +{ + void *p; + + if (!vgacon_scrollback_size || c->vc_num != fg_console) + return; + + p = (void *) (c->vc_origin + t * c->vc_size_row); + + while (count--) { + scr_memcpyw(vgacon_scrollback + vgacon_scrollback_tail, + p, c->vc_size_row); + vgacon_scrollback_cnt++; + p += c->vc_size_row; + vgacon_scrollback_tail += c->vc_size_row; + + if (vgacon_scrollback_tail >= vgacon_scrollback_size) + vgacon_scrollback_tail = 0; + + if (vgacon_scrollback_cnt > vgacon_scrollback_rows) + vgacon_scrollback_cnt = vgacon_scrollback_rows; + + vgacon_scrollback_cur = vgacon_scrollback_cnt; + } +} + +static void vgacon_restore_screen(struct vc_data *c) +{ + vgacon_scrollback_save = 0; + + if (!vga_is_gfx && !vgacon_scrollback_restore) { + scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, + c->vc_screenbuf_size > vga_vram_size ? + vga_vram_size : c->vc_screenbuf_size); + vgacon_scrollback_restore = 1; + vgacon_scrollback_cur = vgacon_scrollback_cnt; + } +} + +static int vgacon_scrolldelta(struct vc_data *c, int lines) +{ + int start, end, count, soff, diff; + void *d, *s; + + if (!lines) { + c->vc_visible_origin = c->vc_origin; + vga_set_mem_top(c); + return 1; + } + + if (!vgacon_scrollback) + return 1; + + if (!vgacon_scrollback_save) { + vgacon_cursor(c, CM_ERASE); + vgacon_save_screen(c); + vgacon_scrollback_save = 1; + } + + vgacon_scrollback_restore = 0; + start = vgacon_scrollback_cur + lines; + end = start + abs(lines); + + if (start < 0) + start = 0; + + if (start > vgacon_scrollback_cnt) + start = vgacon_scrollback_cnt; + + if (end < 0) + end = 0; + + if (end > vgacon_scrollback_cnt) + end = vgacon_scrollback_cnt; + + vgacon_scrollback_cur = start; + count = end - start; + soff = vgacon_scrollback_tail - ((vgacon_scrollback_cnt - end) * + c->vc_size_row); + soff -= count * c->vc_size_row; + + if (soff < 0) + soff += vgacon_scrollback_size; + + count = vgacon_scrollback_cnt - start; + + if (count > c->vc_rows) + count = c->vc_rows; + + diff = c->vc_rows - count; + + d = (void *) c->vc_origin; + s = (void *) c->vc_screenbuf; + + while (count--) { + scr_memcpyw(d, vgacon_scrollback + soff, c->vc_size_row); + d += c->vc_size_row; + soff += c->vc_size_row; + + if (soff >= vgacon_scrollback_size) + soff = 0; + } + + if (diff == c->vc_rows) { + vgacon_cursor(c, CM_MOVE); + } else { + while (diff--) { + scr_memcpyw(d, s, c->vc_size_row); + d += c->vc_size_row; + s += c->vc_size_row; + } + } + + return 1; +} +#else +#define vgacon_scrollback_startup(...) do { } while (0) +#define vgacon_scrollback_init(...) do { } while (0) +#define vgacon_scrollback_update(...) do { } while (0) + +static void vgacon_restore_screen(struct vc_data *c) +{ + if (c->vc_origin != c->vc_visible_origin) + vgacon_scrolldelta(c, 0); +} + +static int vgacon_scrolldelta(struct vc_data *c, int lines) +{ + if (!lines) /* Turn scrollback off */ + c->vc_visible_origin = c->vc_origin; + else { + int margin = c->vc_size_row * 4; + int ul, we, p, st; + + if (vga_rolled_over > + (c->vc_scr_end - vga_vram_base) + margin) { + ul = c->vc_scr_end - vga_vram_base; + we = vga_rolled_over + c->vc_size_row; + } else { + ul = 0; + we = vga_vram_size; + } + p = (c->vc_visible_origin - vga_vram_base - ul + we) % we + + lines * c->vc_size_row; + st = (c->vc_origin - vga_vram_base - ul + we) % we; + if (st < 2 * margin) + margin = 0; + if (p < margin) + p = 0; + if (p > st - margin) + p = st; + c->vc_visible_origin = vga_vram_base + (p + ul) % we; + } + vga_set_mem_top(c); + return 1; +} +#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ + static const char __init *vgacon_startup(void) { const char *display_desc = NULL; @@ -330,7 +524,7 @@ static const char __init *vgacon_startup(void) vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - + vgacon_scrollback_startup(); return display_desc; } @@ -357,11 +551,6 @@ static void vgacon_init(struct vc_data *c, int init) con_set_default_unimap(c); } -static inline void vga_set_mem_top(struct vc_data *c) -{ - write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); -} - static void vgacon_deinit(struct vc_data *c) { /* When closing the last console, reset video origin */ @@ -433,29 +622,37 @@ static void vgacon_set_cursor_size(int xpos, int from, int to) cursor_size_lastto = to; spin_lock_irqsave(&vga_lock, flags); - outb_p(0x0a, vga_video_port_reg); /* Cursor start */ - curs = inb_p(vga_video_port_val); - outb_p(0x0b, vga_video_port_reg); /* Cursor end */ - cure = inb_p(vga_video_port_val); + if (vga_video_type >= VIDEO_TYPE_VGAC) { + outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); + curs = inb_p(vga_video_port_val); + outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); + cure = inb_p(vga_video_port_val); + } else { + curs = 0; + cure = 0; + } curs = (curs & 0xc0) | from; cure = (cure & 0xe0) | to; - outb_p(0x0a, vga_video_port_reg); /* Cursor start */ + outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); outb_p(curs, vga_video_port_val); - outb_p(0x0b, vga_video_port_reg); /* Cursor end */ + outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); outb_p(cure, vga_video_port_val); spin_unlock_irqrestore(&vga_lock, flags); } static void vgacon_cursor(struct vc_data *c, int mode) { - if (c->vc_origin != c->vc_visible_origin) - vgacon_scrolldelta(c, 0); + vgacon_restore_screen(c); + switch (mode) { case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); - vgacon_set_cursor_size(c->vc_x, 31, 30); + if (vga_video_type >= VIDEO_TYPE_VGAC) + vgacon_set_cursor_size(c->vc_x, 31, 30); + else + vgacon_set_cursor_size(c->vc_x, 31, 31); break; case CM_MOVE: @@ -493,7 +690,10 @@ static void vgacon_cursor(struct vc_data *c, int mode) 10 ? 1 : 2)); break; case CUR_NONE: - vgacon_set_cursor_size(c->vc_x, 31, 30); + if (vga_video_type >= VIDEO_TYPE_VGAC) + vgacon_set_cursor_size(c->vc_x, 31, 30); + else + vgacon_set_cursor_size(c->vc_x, 31, 31); break; default: vgacon_set_cursor_size(c->vc_x, 1, @@ -595,6 +795,7 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } + vgacon_scrollback_init(c->vc_size_row); return 0; /* Redrawing not needed */ } @@ -1062,37 +1263,6 @@ static int vgacon_resize(struct vc_data *c, unsigned int width, return 0; } -static int vgacon_scrolldelta(struct vc_data *c, int lines) -{ - if (!lines) /* Turn scrollback off */ - c->vc_visible_origin = c->vc_origin; - else { - int margin = c->vc_size_row * 4; - int ul, we, p, st; - - if (vga_rolled_over > - (c->vc_scr_end - vga_vram_base) + margin) { - ul = c->vc_scr_end - vga_vram_base; - we = vga_rolled_over + c->vc_size_row; - } else { - ul = 0; - we = vga_vram_size; - } - p = (c->vc_visible_origin - vga_vram_base - ul + we) % we + - lines * c->vc_size_row; - st = (c->vc_origin - vga_vram_base - ul + we) % we; - if (st < 2 * margin) - margin = 0; - if (p < margin) - p = 0; - if (p > st - margin) - p = st; - c->vc_visible_origin = vga_vram_base + (p + ul) % we; - } - vga_set_mem_top(c); - return 1; -} - static int vgacon_set_origin(struct vc_data *c) { if (vga_is_gfx || /* We don't play origin tricks in graphic modes */ @@ -1135,15 +1305,14 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, if (t || b != c->vc_rows || vga_is_gfx) return 0; - if (c->vc_origin != c->vc_visible_origin) - vgacon_scrolldelta(c, 0); - if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) return 0; + vgacon_restore_screen(c); oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { + vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index c32a2a50bfa2..1f98392a43b3 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -85,7 +85,7 @@ static struct fb_cmap default_16_colors = { * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. * - * Returns -1 errno on error, or zero on success. + * Returns negative errno on error, or zero on success. * */ @@ -116,7 +116,7 @@ int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) fail: fb_dealloc_cmap(cmap); - return -1; + return -ENOMEM; } /** diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 07d882b14396..b1a8dca76430 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -55,7 +55,7 @@ #define FBPIXMAPSIZE (1024 * 8) -static struct notifier_block *fb_notifier_list; +static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); struct fb_info *registered_fb[FB_MAX]; int num_registered_fb; @@ -784,7 +784,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.info = info; event.data = &mode1; - ret = notifier_call_chain(&fb_notifier_list, + ret = blocking_notifier_call_chain(&fb_notifier_list, FB_EVENT_MODE_DELETE, &event); } @@ -830,8 +830,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) info->flags &= ~FBINFO_MISC_USEREVENT; event.info = info; - notifier_call_chain(&fb_notifier_list, evnt, - &event); + blocking_notifier_call_chain(&fb_notifier_list, + evnt, &event); } } } @@ -854,7 +854,8 @@ fb_blank(struct fb_info *info, int blank) event.info = info; event.data = ␣ - notifier_call_chain(&fb_notifier_list, FB_EVENT_BLANK, &event); + blocking_notifier_call_chain(&fb_notifier_list, + FB_EVENT_BLANK, &event); } return ret; @@ -925,7 +926,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, con2fb.framebuffer = -1; event.info = info; event.data = &con2fb; - notifier_call_chain(&fb_notifier_list, + blocking_notifier_call_chain(&fb_notifier_list, FB_EVENT_GET_CONSOLE_MAP, &event); return copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; @@ -944,7 +945,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, return -EINVAL; event.info = info; event.data = &con2fb; - return notifier_call_chain(&fb_notifier_list, + return blocking_notifier_call_chain(&fb_notifier_list, FB_EVENT_SET_CONSOLE_MAP, &event); case FBIOBLANK: @@ -1324,7 +1325,7 @@ register_framebuffer(struct fb_info *fb_info) devfs_mk_cdev(MKDEV(FB_MAJOR, i), S_IFCHR | S_IRUGO | S_IWUGO, "fb/%d", i); event.info = fb_info; - notifier_call_chain(&fb_notifier_list, + blocking_notifier_call_chain(&fb_notifier_list, FB_EVENT_FB_REGISTERED, &event); return 0; } @@ -1366,7 +1367,7 @@ unregister_framebuffer(struct fb_info *fb_info) */ int fb_register_client(struct notifier_block *nb) { - return notifier_chain_register(&fb_notifier_list, nb); + return blocking_notifier_chain_register(&fb_notifier_list, nb); } /** @@ -1375,7 +1376,7 @@ int fb_register_client(struct notifier_block *nb) */ int fb_unregister_client(struct notifier_block *nb) { - return notifier_chain_unregister(&fb_notifier_list, nb); + return blocking_notifier_chain_unregister(&fb_notifier_list, nb); } /** @@ -1393,11 +1394,13 @@ void fb_set_suspend(struct fb_info *info, int state) event.info = info; if (state) { - notifier_call_chain(&fb_notifier_list, FB_EVENT_SUSPEND, &event); + blocking_notifier_call_chain(&fb_notifier_list, + FB_EVENT_SUSPEND, &event); info->state = FBINFO_STATE_SUSPENDED; } else { info->state = FBINFO_STATE_RUNNING; - notifier_call_chain(&fb_notifier_list, FB_EVENT_RESUME, &event); + blocking_notifier_call_chain(&fb_notifier_list, + FB_EVENT_RESUME, &event); } } @@ -1469,7 +1472,7 @@ int fb_new_modelist(struct fb_info *info) if (!list_empty(&info->modelist)) { event.info = info; - err = notifier_call_chain(&fb_notifier_list, + err = blocking_notifier_call_chain(&fb_notifier_list, FB_EVENT_NEW_MODELIST, &event); } @@ -1495,7 +1498,7 @@ int fb_con_duit(struct fb_info *info, int event, void *data) evnt.info = info; evnt.data = data; - return notifier_call_chain(&fb_notifier_list, event, &evnt); + return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt); } EXPORT_SYMBOL(fb_con_duit); diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 7c74e7325d95..53beeb4a9998 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -1281,7 +1281,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info) -EINVAL : 0; } -#if defined(__i386__) +#if defined(CONFIG_FB_FIRMWARE_EDID) && defined(__i386__) #include <linux/pci.h> /* @@ -1311,11 +1311,11 @@ const unsigned char *fb_firmware_edid(struct device *device) { return NULL; } -#endif /* _i386_ */ +#endif +EXPORT_SYMBOL(fb_firmware_edid); EXPORT_SYMBOL(fb_parse_edid); EXPORT_SYMBOL(fb_edid_to_monspecs); -EXPORT_SYMBOL(fb_firmware_edid); EXPORT_SYMBOL(fb_get_mode); EXPORT_SYMBOL(fb_validate_mode); EXPORT_SYMBOL(fb_destroy_modedb); diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 6d26057337e2..b72b05250a9d 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -348,7 +348,7 @@ static ssize_t store_cmap(struct class_device *class_device, const char *buf, fb_copy_cmap(&umap, &fb_info->cmap); fb_dealloc_cmap(&umap); - return rc; + return rc ?: count; } for (i = 0; i < length; i++) { u16 red, blue, green, tsp; @@ -367,7 +367,7 @@ static ssize_t store_cmap(struct class_device *class_device, const char *buf, if (transp) fb_info->cmap.transp[i] = tsp; } - return 0; + return count; } static ssize_t show_cmap(struct class_device *class_device, char *buf) diff --git a/drivers/video/geode/Kconfig b/drivers/video/geode/Kconfig index 42fb9a89a792..4e173ef20a7d 100644 --- a/drivers/video/geode/Kconfig +++ b/drivers/video/geode/Kconfig @@ -8,9 +8,24 @@ config FB_GEODE Say 'Y' here to allow you to select framebuffer drivers for the AMD Geode family of processors. +config FB_GEODE_GX + tristate "AMD Geode GX framebuffer support (EXPERIMENTAL)" + depends on FB && FB_GEODE && EXPERIMENTAL + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + ---help--- + Framebuffer driver for the display controller integrated into the + AMD Geode GX processors. + + To compile this driver as a module, choose M here: the module will be + called gxfb. + + If unsure, say N. + config FB_GEODE_GX1 tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)" - depends on FB_GEODE && EXPERIMENTAL + depends on FB && FB_GEODE && EXPERIMENTAL select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/geode/Makefile b/drivers/video/geode/Makefile index 13ad501ea990..f896565bc312 100644 --- a/drivers/video/geode/Makefile +++ b/drivers/video/geode/Makefile @@ -1,5 +1,7 @@ # Makefile for the Geode family framebuffer drivers obj-$(CONFIG_FB_GEODE_GX1) += gx1fb.o +obj-$(CONFIG_FB_GEODE_GX) += gxfb.o -gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o +gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o +gxfb-objs := gxfb_core.o display_gx.o video_gx.o diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c new file mode 100644 index 000000000000..825c3405f5c2 --- /dev/null +++ b/drivers/video/geode/display_gx.c @@ -0,0 +1,156 @@ +/* + * Geode GX display controller. + * + * Copyright (C) 2005 Arcom Control Systems Ltd. + * + * Portions from AMD's original 2.4 driver: + * Copyright (C) 2004 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by * the + * Free Software Foundation; either version 2 of the License, or * (at your + * option) any later version. + */ +#include <linux/spinlock.h> +#include <linux/fb.h> +#include <linux/delay.h> +#include <asm/io.h> +#include <asm/div64.h> +#include <asm/delay.h> + +#include "geodefb.h" +#include "display_gx.h" + +int gx_frame_buffer_size(void) +{ + /* Assuming 16 MiB. */ + return 16*1024*1024; +} + +int gx_line_delta(int xres, int bpp) +{ + /* Must be a multiple of 8 bytes. */ + return (xres * (bpp >> 3) + 7) & ~0x7; +} + +static void gx_set_mode(struct fb_info *info) +{ + struct geodefb_par *par = info->par; + u32 gcfg, dcfg; + int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal; + int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal; + + /* Unlock the display controller registers. */ + readl(par->dc_regs + DC_UNLOCK); + writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK); + + gcfg = readl(par->dc_regs + DC_GENERAL_CFG); + dcfg = readl(par->dc_regs + DC_DISPLAY_CFG); + + /* Disable the timing generator. */ + dcfg &= ~(DC_DCFG_TGEN); + writel(dcfg, par->dc_regs + DC_DISPLAY_CFG); + + /* Wait for pending memory requests before disabling the FIFO load. */ + udelay(100); + + /* Disable FIFO load and compression. */ + gcfg &= ~(DC_GCFG_DFLE | DC_GCFG_CMPE | DC_GCFG_DECE); + writel(gcfg, par->dc_regs + DC_GENERAL_CFG); + + /* Setup DCLK and its divisor. */ + par->vid_ops->set_dclk(info); + + /* + * Setup new mode. + */ + + /* Clear all unused feature bits. */ + gcfg &= DC_GCFG_YUVM | DC_GCFG_VDSE; + dcfg = 0; + + /* Set FIFO priority (default 6/5) and enable. */ + /* FIXME: increase fifo priority for 1280x1024 and higher modes? */ + gcfg |= (6 << DC_GCFG_DFHPEL_POS) | (5 << DC_GCFG_DFHPSL_POS) | DC_GCFG_DFLE; + + /* Framebuffer start offset. */ + writel(0, par->dc_regs + DC_FB_ST_OFFSET); + + /* Line delta and line buffer length. */ + writel(info->fix.line_length >> 3, par->dc_regs + DC_GFX_PITCH); + writel(((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2, + par->dc_regs + DC_LINE_SIZE); + + /* Enable graphics and video data and unmask address lines. */ + dcfg |= DC_DCFG_GDEN | DC_DCFG_VDEN | DC_DCFG_A20M | DC_DCFG_A18M; + + /* Set pixel format. */ + switch (info->var.bits_per_pixel) { + case 8: + dcfg |= DC_DCFG_DISP_MODE_8BPP; + break; + case 16: + dcfg |= DC_DCFG_DISP_MODE_16BPP; + dcfg |= DC_DCFG_16BPP_MODE_565; + break; + case 32: + dcfg |= DC_DCFG_DISP_MODE_24BPP; + dcfg |= DC_DCFG_PALB; + break; + } + + /* Enable timing generator. */ + dcfg |= DC_DCFG_TGEN; + + /* Horizontal and vertical timings. */ + hactive = info->var.xres; + hblankstart = hactive; + hsyncstart = hblankstart + info->var.right_margin; + hsyncend = hsyncstart + info->var.hsync_len; + hblankend = hsyncend + info->var.left_margin; + htotal = hblankend; + + vactive = info->var.yres; + vblankstart = vactive; + vsyncstart = vblankstart + info->var.lower_margin; + vsyncend = vsyncstart + info->var.vsync_len; + vblankend = vsyncend + info->var.upper_margin; + vtotal = vblankend; + + writel((hactive - 1) | ((htotal - 1) << 16), par->dc_regs + DC_H_ACTIVE_TIMING); + writel((hblankstart - 1) | ((hblankend - 1) << 16), par->dc_regs + DC_H_BLANK_TIMING); + writel((hsyncstart - 1) | ((hsyncend - 1) << 16), par->dc_regs + DC_H_SYNC_TIMING); + + writel((vactive - 1) | ((vtotal - 1) << 16), par->dc_regs + DC_V_ACTIVE_TIMING); + writel((vblankstart - 1) | ((vblankend - 1) << 16), par->dc_regs + DC_V_BLANK_TIMING); + writel((vsyncstart - 1) | ((vsyncend - 1) << 16), par->dc_regs + DC_V_SYNC_TIMING); + + /* Write final register values. */ + writel(dcfg, par->dc_regs + DC_DISPLAY_CFG); + writel(gcfg, par->dc_regs + DC_GENERAL_CFG); + + par->vid_ops->configure_display(info); + + /* Relock display controller registers */ + writel(0, par->dc_regs + DC_UNLOCK); +} + +static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno, + unsigned red, unsigned green, unsigned blue) +{ + struct geodefb_par *par = info->par; + int val; + + /* Hardware palette is in RGB 8-8-8 format. */ + val = (red << 8) & 0xff0000; + val |= (green) & 0x00ff00; + val |= (blue >> 8) & 0x0000ff; + + writel(regno, par->dc_regs + DC_PAL_ADDRESS); + writel(val, par->dc_regs + DC_PAL_DATA); +} + +struct geode_dc_ops gx_dc_ops = { + .set_mode = gx_set_mode, + .set_palette_reg = gx_set_hw_palette_reg, +}; diff --git a/drivers/video/geode/display_gx.h b/drivers/video/geode/display_gx.h new file mode 100644 index 000000000000..86c623361305 --- /dev/null +++ b/drivers/video/geode/display_gx.h @@ -0,0 +1,96 @@ +/* + * Geode GX display controller + * + * Copyright (C) 2006 Arcom Control Systems Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DISPLAY_GX_H__ +#define __DISPLAY_GX_H__ + +int gx_frame_buffer_size(void); +int gx_line_delta(int xres, int bpp); + +extern struct geode_dc_ops gx_dc_ops; + +/* Display controller registers */ + +#define DC_UNLOCK 0x00 +# define DC_UNLOCK_CODE 0x00004758 + +#define DC_GENERAL_CFG 0x04 +# define DC_GCFG_DFLE 0x00000001 +# define DC_GCFG_CURE 0x00000002 +# define DC_GCFG_ICNE 0x00000004 +# define DC_GCFG_VIDE 0x00000008 +# define DC_GCFG_CMPE 0x00000020 +# define DC_GCFG_DECE 0x00000040 +# define DC_GCFG_VGAE 0x00000080 +# define DC_GCFG_DFHPSL_MASK 0x00000F00 +# define DC_GCFG_DFHPSL_POS 8 +# define DC_GCFG_DFHPEL_MASK 0x0000F000 +# define DC_GCFG_DFHPEL_POS 12 +# define DC_GCFG_STFM 0x00010000 +# define DC_GCFG_FDTY 0x00020000 +# define DC_GCFG_VGAFT 0x00040000 +# define DC_GCFG_VDSE 0x00080000 +# define DC_GCFG_YUVM 0x00100000 +# define DC_GCFG_VFSL 0x00800000 +# define DC_GCFG_SIGE 0x01000000 +# define DC_GCFG_SGRE 0x02000000 +# define DC_GCFG_SGFR 0x04000000 +# define DC_GCFG_CRC_MODE 0x08000000 +# define DC_GCFG_DIAG 0x10000000 +# define DC_GCFG_CFRW 0x20000000 + +#define DC_DISPLAY_CFG 0x08 +# define DC_DCFG_TGEN 0x00000001 +# define DC_DCFG_GDEN 0x00000008 +# define DC_DCFG_VDEN 0x00000010 +# define DC_DCFG_TRUP 0x00000040 +# define DC_DCFG_DISP_MODE_MASK 0x00000300 +# define DC_DCFG_DISP_MODE_8BPP 0x00000000 +# define DC_DCFG_DISP_MODE_16BPP 0x00000100 +# define DC_DCFG_DISP_MODE_24BPP 0x00000200 +# define DC_DCFG_16BPP_MODE_MASK 0x00000c00 +# define DC_DCFG_16BPP_MODE_565 0x00000000 +# define DC_DCFG_16BPP_MODE_555 0x00000100 +# define DC_DCFG_16BPP_MODE_444 0x00000200 +# define DC_DCFG_DCEN 0x00080000 +# define DC_DCFG_PALB 0x02000000 +# define DC_DCFG_FRLK 0x04000000 +# define DC_DCFG_VISL 0x08000000 +# define DC_DCFG_FRSL 0x20000000 +# define DC_DCFG_A18M 0x40000000 +# define DC_DCFG_A20M 0x80000000 + +#define DC_FB_ST_OFFSET 0x10 + +#define DC_LINE_SIZE 0x30 +# define DC_LINE_SIZE_FB_LINE_SIZE_MASK 0x000007ff +# define DC_LINE_SIZE_FB_LINE_SIZE_POS 0 +# define DC_LINE_SIZE_CB_LINE_SIZE_MASK 0x007f0000 +# define DC_LINE_SIZE_CB_LINE_SIZE_POS 16 +# define DC_LINE_SIZE_VID_LINE_SIZE_MASK 0xff000000 +# define DC_LINE_SIZE_VID_LINE_SIZE_POS 24 + +#define DC_GFX_PITCH 0x34 +# define DC_GFX_PITCH_FB_PITCH_MASK 0x0000ffff +# define DC_GFX_PITCH_FB_PITCH_POS 0 +# define DC_GFX_PITCH_CB_PITCH_MASK 0xffff0000 +# define DC_GFX_PITCH_CB_PITCH_POS 16 + +#define DC_H_ACTIVE_TIMING 0x40 +#define DC_H_BLANK_TIMING 0x44 +#define DC_H_SYNC_TIMING 0x48 +#define DC_V_ACTIVE_TIMING 0x50 +#define DC_V_BLANK_TIMING 0x54 +#define DC_V_SYNC_TIMING 0x58 + +#define DC_PAL_ADDRESS 0x70 +#define DC_PAL_DATA 0x74 + +#endif /* !__DISPLAY_GX1_H__ */ diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c new file mode 100644 index 000000000000..89c34b15f5d4 --- /dev/null +++ b/drivers/video/geode/gxfb_core.c @@ -0,0 +1,423 @@ +/* + * Geode GX framebuffer driver. + * + * Copyright (C) 2006 Arcom Control Systems Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * This driver assumes that the BIOS has created a virtual PCI device header + * for the video device. The PCI header is assumed to contain the following + * BARs: + * + * BAR0 - framebuffer memory + * BAR1 - graphics processor registers + * BAR2 - display controller registers + * BAR3 - video processor and flat panel control registers. + * + * 16 MiB of framebuffer memory is assumed to be available. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/tty.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/pci.h> + +#include "geodefb.h" +#include "display_gx.h" +#include "video_gx.h" + +static char mode_option[32] = "640x480-16@60"; + +/* Modes relevant to the GX (taken from modedb.c) */ +static const struct fb_videomode __initdata gx_modedb[] = { + /* 640x480-60 VESA */ + { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 640x480-75 VESA */ + { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 640x480-85 VESA */ + { NULL, 85, 640, 480, 27777, 80, 56, 25, 01, 56, 3, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 800x600-60 VESA */ + { NULL, 60, 800, 600, 25000, 88, 40, 23, 01, 128, 4, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 800x600-75 VESA */ + { NULL, 75, 800, 600, 20202, 160, 16, 21, 01, 80, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 800x600-85 VESA */ + { NULL, 85, 800, 600, 17761, 152, 32, 27, 01, 64, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1024x768-60 VESA */ + { NULL, 60, 1024, 768, 15384, 160, 24, 29, 3, 136, 6, + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1024x768-75 VESA */ + { NULL, 75, 1024, 768, 12690, 176, 16, 28, 1, 96, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1024x768-85 VESA */ + { NULL, 85, 1024, 768, 10582, 208, 48, 36, 1, 96, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1280x960-60 VESA */ + { NULL, 60, 1280, 960, 9259, 312, 96, 36, 1, 112, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1280x960-85 VESA */ + { NULL, 85, 1280, 960, 6734, 224, 64, 47, 1, 160, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1280x1024-60 VESA */ + { NULL, 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1280x1024-75 VESA */ + { NULL, 75, 1280, 1024, 7407, 248, 16, 38, 1, 144, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1280x1024-85 VESA */ + { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1600x1200-60 VESA */ + { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1600x1200-75 VESA */ + { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, + /* 1600x1200-85 VESA */ + { NULL, 85, 1600, 1200, 4357, 304, 64, 46, 1, 192, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, +}; + +static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + if (var->xres > 1600 || var->yres > 1200) + return -EINVAL; + if ((var->xres > 1280 || var->yres > 1024) && var->bits_per_pixel > 16) + return -EINVAL; + + if (var->bits_per_pixel == 32) { + var->red.offset = 16; var->red.length = 8; + var->green.offset = 8; var->green.length = 8; + var->blue.offset = 0; var->blue.length = 8; + } else if (var->bits_per_pixel == 16) { + var->red.offset = 11; var->red.length = 5; + var->green.offset = 5; var->green.length = 6; + var->blue.offset = 0; var->blue.length = 5; + } else if (var->bits_per_pixel == 8) { + var->red.offset = 0; var->red.length = 8; + var->green.offset = 0; var->green.length = 8; + var->blue.offset = 0; var->blue.length = 8; + } else + return -EINVAL; + var->transp.offset = 0; var->transp.length = 0; + + /* Enough video memory? */ + if (gx_line_delta(var->xres, var->bits_per_pixel) * var->yres > info->fix.smem_len) + return -EINVAL; + + /* FIXME: Check timing parameters here? */ + + return 0; +} + +static int gxfb_set_par(struct fb_info *info) +{ + struct geodefb_par *par = info->par; + + if (info->var.bits_per_pixel > 8) { + info->fix.visual = FB_VISUAL_TRUECOLOR; + fb_dealloc_cmap(&info->cmap); + } else { + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0); + } + + info->fix.line_length = gx_line_delta(info->var.xres, info->var.bits_per_pixel); + + par->dc_ops->set_mode(info); + + return 0; +} + +static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) +{ + chan &= 0xffff; + chan >>= 16 - bf->length; + return chan << bf->offset; +} + +static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + struct geodefb_par *par = info->par; + + if (info->var.grayscale) { + /* grayscale = 0.30*R + 0.59*G + 0.11*B */ + red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; + } + + /* Truecolor has hardware independent palette */ + if (info->fix.visual == FB_VISUAL_TRUECOLOR) { + u32 *pal = info->pseudo_palette; + u32 v; + + if (regno >= 16) + return -EINVAL; + + v = chan_to_field(red, &info->var.red); + v |= chan_to_field(green, &info->var.green); + v |= chan_to_field(blue, &info->var.blue); + + pal[regno] = v; + } else { + if (regno >= 256) + return -EINVAL; + + par->dc_ops->set_palette_reg(info, regno, red, green, blue); + } + + return 0; +} + +static int gxfb_blank(int blank_mode, struct fb_info *info) +{ + struct geodefb_par *par = info->par; + + return par->vid_ops->blank_display(info, blank_mode); +} + +static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev) +{ + struct geodefb_par *par = info->par; + int fb_len; + int ret; + + ret = pci_enable_device(dev); + if (ret < 0) + return ret; + + ret = pci_request_region(dev, 3, "gxfb (video processor)"); + if (ret < 0) + return ret; + par->vid_regs = ioremap(pci_resource_start(dev, 3), + pci_resource_len(dev, 3)); + if (!par->vid_regs) + return -ENOMEM; + + ret = pci_request_region(dev, 2, "gxfb (display controller)"); + if (ret < 0) + return ret; + par->dc_regs = ioremap(pci_resource_start(dev, 2), pci_resource_len(dev, 2)); + if (!par->dc_regs) + return -ENOMEM; + + ret = pci_request_region(dev, 0, "gxfb (framebuffer)"); + if (ret < 0) + return ret; + if ((fb_len = gx_frame_buffer_size()) < 0) + return -ENOMEM; + info->fix.smem_start = pci_resource_start(dev, 0); + info->fix.smem_len = fb_len; + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); + if (!info->screen_base) + return -ENOMEM; + + dev_info(&dev->dev, "%d Kibyte of video memory at 0x%lx\n", + info->fix.smem_len / 1024, info->fix.smem_start); + + return 0; +} + +static struct fb_ops gxfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = gxfb_check_var, + .fb_set_par = gxfb_set_par, + .fb_setcolreg = gxfb_setcolreg, + .fb_blank = gxfb_blank, + /* No HW acceleration for now. */ + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +}; + +static struct fb_info * __init gxfb_init_fbinfo(struct device *dev) +{ + struct geodefb_par *par; + struct fb_info *info; + + /* Alloc enough space for the pseudo palette. */ + info = framebuffer_alloc(sizeof(struct geodefb_par) + sizeof(u32) * 16, dev); + if (!info) + return NULL; + + par = info->par; + + strcpy(info->fix.id, "Geode GX"); + + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.type_aux = 0; + info->fix.xpanstep = 0; + info->fix.ypanstep = 0; + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + + info->var.nonstd = 0; + info->var.activate = FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + info->var.accel_flags = 0; + info->var.vmode = FB_VMODE_NONINTERLACED; + + info->fbops = &gxfb_ops; + info->flags = FBINFO_DEFAULT; + info->node = -1; + + info->pseudo_palette = (void *)par + sizeof(struct geodefb_par); + + info->var.grayscale = 0; + + return info; +} + +static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct geodefb_par *par; + struct fb_info *info; + int ret; + + info = gxfb_init_fbinfo(&pdev->dev); + if (!info) + return -ENOMEM; + par = info->par; + + /* GX display controller and GX video device. */ + par->dc_ops = &gx_dc_ops; + par->vid_ops = &gx_vid_ops; + + if ((ret = gxfb_map_video_memory(info, pdev)) < 0) { + dev_err(&pdev->dev, "failed to map frame buffer or controller registers\n"); + goto err; + } + + ret = fb_find_mode(&info->var, info, mode_option, + gx_modedb, ARRAY_SIZE(gx_modedb), NULL, 16); + if (ret == 0 || ret == 4) { + dev_err(&pdev->dev, "could not find valid video mode\n"); + ret = -EINVAL; + goto err; + } + + /* Clear the frame buffer of garbage. */ + memset_io(info->screen_base, 0, info->fix.smem_len); + + gxfb_check_var(&info->var, info); + gxfb_set_par(info); + + if (register_framebuffer(info) < 0) { + ret = -EINVAL; + goto err; + } + pci_set_drvdata(pdev, info); + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); + return 0; + + err: + if (info->screen_base) { + iounmap(info->screen_base); + pci_release_region(pdev, 0); + } + if (par->vid_regs) { + iounmap(par->vid_regs); + pci_release_region(pdev, 3); + } + if (par->dc_regs) { + iounmap(par->dc_regs); + pci_release_region(pdev, 2); + } + + pci_disable_device(pdev); + + if (info) + framebuffer_release(info); + return ret; +} + +static void gxfb_remove(struct pci_dev *pdev) +{ + struct fb_info *info = pci_get_drvdata(pdev); + struct geodefb_par *par = info->par; + + unregister_framebuffer(info); + + iounmap((void __iomem *)info->screen_base); + pci_release_region(pdev, 0); + + iounmap(par->vid_regs); + pci_release_region(pdev, 3); + + iounmap(par->dc_regs); + pci_release_region(pdev, 2); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + framebuffer_release(info); +} + +static struct pci_device_id gxfb_id_table[] = { + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO, + PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, + 0xff0000, 0 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, gxfb_id_table); + +static struct pci_driver gxfb_driver = { + .name = "gxfb", + .id_table = gxfb_id_table, + .probe = gxfb_probe, + .remove = gxfb_remove, +}; + +static int __init gxfb_init(void) +{ +#ifndef MODULE + if (fb_get_options("gxfb", NULL)) + return -ENODEV; +#endif + return pci_register_driver(&gxfb_driver); +} + +static void __exit gxfb_cleanup(void) +{ + pci_unregister_driver(&gxfb_driver); +} + +module_init(gxfb_init); +module_exit(gxfb_cleanup); + +module_param_string(mode, mode_option, sizeof(mode_option), 0444); +MODULE_PARM_DESC(mode, "video mode (<x>x<y>[-<bpp>][@<refr>])"); + +MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode GX"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c new file mode 100644 index 000000000000..2b2a7880ea75 --- /dev/null +++ b/drivers/video/geode/video_gx.c @@ -0,0 +1,262 @@ +/* + * Geode GX video processor device. + * + * Copyright (C) 2006 Arcom Control Systems Ltd. + * + * Portions from AMD's original 2.4 driver: + * Copyright (C) 2004 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/fb.h> +#include <linux/delay.h> +#include <asm/io.h> +#include <asm/delay.h> +#include <asm/msr.h> + +#include "geodefb.h" +#include "video_gx.h" + + +/* + * Tables of register settings for various DOTCLKs. + */ +struct gx_pll_entry { + long pixclock; /* ps */ + u32 sys_rstpll_bits; + u32 dotpll_value; +}; + +#define POSTDIV3 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3) +#define PREMULT2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPREMULT2) +#define PREDIV2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3) + +static const struct gx_pll_entry gx_pll_table_48MHz[] = { + { 40123, POSTDIV3, 0x00000BF2 }, /* 24.9230 */ + { 39721, 0, 0x00000037 }, /* 25.1750 */ + { 35308, POSTDIV3|PREMULT2, 0x00000B1A }, /* 28.3220 */ + { 31746, POSTDIV3, 0x000002D2 }, /* 31.5000 */ + { 27777, POSTDIV3|PREMULT2, 0x00000FE2 }, /* 36.0000 */ + { 26666, POSTDIV3, 0x0000057A }, /* 37.5000 */ + { 25000, POSTDIV3, 0x0000030A }, /* 40.0000 */ + { 22271, 0, 0x00000063 }, /* 44.9000 */ + { 20202, 0, 0x0000054B }, /* 49.5000 */ + { 20000, 0, 0x0000026E }, /* 50.0000 */ + { 19860, PREMULT2, 0x00000037 }, /* 50.3500 */ + { 18518, POSTDIV3|PREMULT2, 0x00000B0D }, /* 54.0000 */ + { 17777, 0, 0x00000577 }, /* 56.2500 */ + { 17733, 0, 0x000007F7 }, /* 56.3916 */ + { 17653, 0, 0x0000057B }, /* 56.6444 */ + { 16949, PREMULT2, 0x00000707 }, /* 59.0000 */ + { 15873, POSTDIV3|PREMULT2, 0x00000B39 }, /* 63.0000 */ + { 15384, POSTDIV3|PREMULT2, 0x00000B45 }, /* 65.0000 */ + { 14814, POSTDIV3|PREMULT2, 0x00000FC1 }, /* 67.5000 */ + { 14124, POSTDIV3, 0x00000561 }, /* 70.8000 */ + { 13888, POSTDIV3, 0x000007E1 }, /* 72.0000 */ + { 13426, PREMULT2, 0x00000F4A }, /* 74.4810 */ + { 13333, 0, 0x00000052 }, /* 75.0000 */ + { 12698, 0, 0x00000056 }, /* 78.7500 */ + { 12500, POSTDIV3|PREMULT2, 0x00000709 }, /* 80.0000 */ + { 11135, PREMULT2, 0x00000262 }, /* 89.8000 */ + { 10582, 0, 0x000002D2 }, /* 94.5000 */ + { 10101, PREMULT2, 0x00000B4A }, /* 99.0000 */ + { 10000, PREMULT2, 0x00000036 }, /* 100.0000 */ + { 9259, 0, 0x000007E2 }, /* 108.0000 */ + { 8888, 0, 0x000007F6 }, /* 112.5000 */ + { 7692, POSTDIV3|PREMULT2, 0x00000FB0 }, /* 130.0000 */ + { 7407, POSTDIV3|PREMULT2, 0x00000B50 }, /* 135.0000 */ + { 6349, 0, 0x00000055 }, /* 157.5000 */ + { 6172, 0, 0x000009C1 }, /* 162.0000 */ + { 5787, PREMULT2, 0x0000002D }, /* 172.798 */ + { 5698, 0, 0x000002C1 }, /* 175.5000 */ + { 5291, 0, 0x000002D1 }, /* 189.0000 */ + { 4938, 0, 0x00000551 }, /* 202.5000 */ + { 4357, 0, 0x0000057D }, /* 229.5000 */ +}; + +static const struct gx_pll_entry gx_pll_table_14MHz[] = { + { 39721, 0, 0x00000037 }, /* 25.1750 */ + { 35308, 0, 0x00000B7B }, /* 28.3220 */ + { 31746, 0, 0x000004D3 }, /* 31.5000 */ + { 27777, 0, 0x00000BE3 }, /* 36.0000 */ + { 26666, 0, 0x0000074F }, /* 37.5000 */ + { 25000, 0, 0x0000050B }, /* 40.0000 */ + { 22271, 0, 0x00000063 }, /* 44.9000 */ + { 20202, 0, 0x0000054B }, /* 49.5000 */ + { 20000, 0, 0x0000026E }, /* 50.0000 */ + { 19860, 0, 0x000007C3 }, /* 50.3500 */ + { 18518, 0, 0x000007E3 }, /* 54.0000 */ + { 17777, 0, 0x00000577 }, /* 56.2500 */ + { 17733, 0, 0x000002FB }, /* 56.3916 */ + { 17653, 0, 0x0000057B }, /* 56.6444 */ + { 16949, 0, 0x0000058B }, /* 59.0000 */ + { 15873, 0, 0x0000095E }, /* 63.0000 */ + { 15384, 0, 0x0000096A }, /* 65.0000 */ + { 14814, 0, 0x00000BC2 }, /* 67.5000 */ + { 14124, 0, 0x0000098A }, /* 70.8000 */ + { 13888, 0, 0x00000BE2 }, /* 72.0000 */ + { 13333, 0, 0x00000052 }, /* 75.0000 */ + { 12698, 0, 0x00000056 }, /* 78.7500 */ + { 12500, 0, 0x0000050A }, /* 80.0000 */ + { 11135, 0, 0x0000078E }, /* 89.8000 */ + { 10582, 0, 0x000002D2 }, /* 94.5000 */ + { 10101, 0, 0x000011F6 }, /* 99.0000 */ + { 10000, 0, 0x0000054E }, /* 100.0000 */ + { 9259, 0, 0x000007E2 }, /* 108.0000 */ + { 8888, 0, 0x000002FA }, /* 112.5000 */ + { 7692, 0, 0x00000BB1 }, /* 130.0000 */ + { 7407, 0, 0x00000975 }, /* 135.0000 */ + { 6349, 0, 0x00000055 }, /* 157.5000 */ + { 6172, 0, 0x000009C1 }, /* 162.0000 */ + { 5698, 0, 0x000002C1 }, /* 175.5000 */ + { 5291, 0, 0x00000539 }, /* 189.0000 */ + { 4938, 0, 0x00000551 }, /* 202.5000 */ + { 4357, 0, 0x0000057D }, /* 229.5000 */ +}; + +static void gx_set_dclk_frequency(struct fb_info *info) +{ + const struct gx_pll_entry *pll_table; + int pll_table_len; + int i, best_i; + long min, diff; + u64 dotpll, sys_rstpll; + int timeout = 1000; + + /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ + if (cpu_data->x86_mask == 1) { + pll_table = gx_pll_table_14MHz; + pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); + } else { + pll_table = gx_pll_table_48MHz; + pll_table_len = ARRAY_SIZE(gx_pll_table_48MHz); + } + + /* Search the table for the closest pixclock. */ + best_i = 0; + min = abs(pll_table[0].pixclock - info->var.pixclock); + for (i = 1; i < pll_table_len; i++) { + diff = abs(pll_table[i].pixclock - info->var.pixclock); + if (diff < min) { + min = diff; + best_i = i; + } + } + + rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + rdmsrl(MSR_GLCP_DOTPLL, dotpll); + + /* Program new M, N and P. */ + dotpll &= 0x00000000ffffffffull; + dotpll |= (u64)pll_table[best_i].dotpll_value << 32; + dotpll |= MSR_GLCP_DOTPLL_DOTRESET; + dotpll &= ~MSR_GLCP_DOTPLL_BYPASS; + + wrmsrl(MSR_GLCP_DOTPLL, dotpll); + + /* Program dividers. */ + sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 + | MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 + | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 ); + sys_rstpll |= pll_table[best_i].sys_rstpll_bits; + + wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + + /* Clear reset bit to start PLL. */ + dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET); + wrmsrl(MSR_GLCP_DOTPLL, dotpll); + + /* Wait for LOCK bit. */ + do { + rdmsrl(MSR_GLCP_DOTPLL, dotpll); + } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK)); +} + +static void gx_configure_display(struct fb_info *info) +{ + struct geodefb_par *par = info->par; + u32 dcfg, fp_pm; + + dcfg = readl(par->vid_regs + GX_DCFG); + + /* Clear bits from existing mode. */ + dcfg &= ~(GX_DCFG_CRT_SYNC_SKW_MASK + | GX_DCFG_CRT_HSYNC_POL | GX_DCFG_CRT_VSYNC_POL + | GX_DCFG_VSYNC_EN | GX_DCFG_HSYNC_EN); + + /* Set default sync skew. */ + dcfg |= GX_DCFG_CRT_SYNC_SKW_DFLT; + + /* Enable hsync and vsync. */ + dcfg |= GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN; + + /* Sync polarities. */ + if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) + dcfg |= GX_DCFG_CRT_HSYNC_POL; + if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) + dcfg |= GX_DCFG_CRT_VSYNC_POL; + + writel(dcfg, par->vid_regs + GX_DCFG); + + /* Power on flat panel. */ + fp_pm = readl(par->vid_regs + GX_FP_PM); + fp_pm |= GX_FP_PM_P; + writel(fp_pm, par->vid_regs + GX_FP_PM); +} + +static int gx_blank_display(struct fb_info *info, int blank_mode) +{ + struct geodefb_par *par = info->par; + u32 dcfg, fp_pm; + int blank, hsync, vsync; + + /* CRT power saving modes. */ + switch (blank_mode) { + case FB_BLANK_UNBLANK: + blank = 0; hsync = 1; vsync = 1; + break; + case FB_BLANK_NORMAL: + blank = 1; hsync = 1; vsync = 1; + break; + case FB_BLANK_VSYNC_SUSPEND: + blank = 1; hsync = 1; vsync = 0; + break; + case FB_BLANK_HSYNC_SUSPEND: + blank = 1; hsync = 0; vsync = 1; + break; + case FB_BLANK_POWERDOWN: + blank = 1; hsync = 0; vsync = 0; + break; + default: + return -EINVAL; + } + dcfg = readl(par->vid_regs + GX_DCFG); + dcfg &= ~(GX_DCFG_DAC_BL_EN + | GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN); + if (!blank) + dcfg |= GX_DCFG_DAC_BL_EN; + if (hsync) + dcfg |= GX_DCFG_HSYNC_EN; + if (vsync) + dcfg |= GX_DCFG_VSYNC_EN; + writel(dcfg, par->vid_regs + GX_DCFG); + + /* Power on/off flat panel. */ + fp_pm = readl(par->vid_regs + GX_FP_PM); + if (blank_mode == FB_BLANK_POWERDOWN) + fp_pm &= ~GX_FP_PM_P; + else + fp_pm |= GX_FP_PM_P; + writel(fp_pm, par->vid_regs + GX_FP_PM); + + return 0; +} + +struct geode_vid_ops gx_vid_ops = { + .set_dclk = gx_set_dclk_frequency, + .configure_display = gx_configure_display, + .blank_display = gx_blank_display, +}; diff --git a/drivers/video/geode/video_gx.h b/drivers/video/geode/video_gx.h new file mode 100644 index 000000000000..2d9211f3ed84 --- /dev/null +++ b/drivers/video/geode/video_gx.h @@ -0,0 +1,47 @@ +/* + * Geode GX video device + * + * Copyright (C) 2006 Arcom Control Systems Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __VIDEO_GX_H__ +#define __VIDEO_GX_H__ + +extern struct geode_vid_ops gx_vid_ops; + +/* Geode GX video processor registers */ + +#define GX_DCFG 0x0008 +# define GX_DCFG_CRT_EN 0x00000001 +# define GX_DCFG_HSYNC_EN 0x00000002 +# define GX_DCFG_VSYNC_EN 0x00000004 +# define GX_DCFG_DAC_BL_EN 0x00000008 +# define GX_DCFG_CRT_HSYNC_POL 0x00000100 +# define GX_DCFG_CRT_VSYNC_POL 0x00000200 +# define GX_DCFG_CRT_SYNC_SKW_MASK 0x0001C000 +# define GX_DCFG_CRT_SYNC_SKW_DFLT 0x00010000 +# define GX_DCFG_VG_CK 0x00100000 +# define GX_DCFG_GV_GAM 0x00200000 +# define GX_DCFG_DAC_VREF 0x04000000 + +/* Geode GX flat panel display control registers */ +#define GX_FP_PM 0x410 +# define GX_FP_PM_P 0x01000000 + +/* Geode GX clock control MSRs */ + +#define MSR_GLCP_SYS_RSTPLL 0x4c000014 +# define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 (0x0000000000000002ull) +# define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (0x0000000000000004ull) +# define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (0x0000000000000008ull) + +#define MSR_GLCP_DOTPLL 0x4c000015 +# define MSR_GLCP_DOTPLL_DOTRESET (0x0000000000000001ull) +# define MSR_GLCP_DOTPLL_BYPASS (0x0000000000008000ull) +# define MSR_GLCP_DOTPLL_LOCK (0x0000000002000000ull) + +#endif /* !__VIDEO_GX_H__ */ diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c index e3c8b5f1ca76..3fe3ae1aff12 100644 --- a/drivers/video/i810/i810-i2c.c +++ b/drivers/video/i810/i810-i2c.c @@ -210,8 +210,7 @@ int i810_probe_i2c_connector(struct fb_info *info, u8 **out_edid, int conn) } } - if (out_edid) - *out_edid = edid; + *out_edid = edid; return (edid) ? 0 : 1; } diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c index 7db42542eb19..f73c642b50c2 100644 --- a/drivers/video/imsttfb.c +++ b/drivers/video/imsttfb.c @@ -440,9 +440,9 @@ getclkMHz(struct imstt_par *par) static void setclkMHz(struct imstt_par *par, __u32 MHz) { - __u32 clk_m, clk_n, clk_p, x, stage, spilled; + __u32 clk_m, clk_n, x, stage, spilled; - clk_m = clk_n = clk_p = 0; + clk_m = clk_n = 0; stage = spilled = 0; for (;;) { switch (stage) { @@ -453,7 +453,7 @@ setclkMHz(struct imstt_par *par, __u32 MHz) clk_n++; break; } - x = 20 * (clk_m + 1) / ((clk_n + 1) * (clk_p ? 2 * clk_p : 1)); + x = 20 * (clk_m + 1) / (clk_n + 1); if (x == MHz) break; if (x > MHz) { @@ -466,7 +466,7 @@ setclkMHz(struct imstt_par *par, __u32 MHz) par->init.pclk_m = clk_m; par->init.pclk_n = clk_n; - par->init.pclk_p = clk_p; + par->init.pclk_p = 0; } static struct imstt_regvals * @@ -1372,18 +1372,24 @@ init_imstt(struct fb_info *info) write_reg_le32(par->dc_regs, STGCTL, tmp & ~0x1); write_reg_le32(par->dc_regs, SSR, 0); - /* set default values for DAC registers */ + /* set default values for DAC registers */ if (par->ramdac == IBM) { - par->cmap_regs[PPMASK] = 0xff; eieio(); - par->cmap_regs[PIDXHI] = 0; eieio(); - for (i = 0; i < sizeof(ibm_initregs) / sizeof(*ibm_initregs); i++) { - par->cmap_regs[PIDXLO] = ibm_initregs[i].addr; eieio(); - par->cmap_regs[PIDXDATA] = ibm_initregs[i].value; eieio(); + par->cmap_regs[PPMASK] = 0xff; + eieio(); + par->cmap_regs[PIDXHI] = 0; + eieio(); + for (i = 0; i < ARRAY_SIZE(ibm_initregs); i++) { + par->cmap_regs[PIDXLO] = ibm_initregs[i].addr; + eieio(); + par->cmap_regs[PIDXDATA] = ibm_initregs[i].value; + eieio(); } } else { - for (i = 0; i < sizeof(tvp_initregs) / sizeof(*tvp_initregs); i++) { - par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr; eieio(); - par->cmap_regs[TVPIDATA] = tvp_initregs[i].value; eieio(); + for (i = 0; i < ARRAY_SIZE(tvp_initregs); i++) { + par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr; + eieio(); + par->cmap_regs[TVPIDATA] = tvp_initregs[i].value; + eieio(); } } diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c index 2fc71081f7e7..c0385c6f7db5 100644 --- a/drivers/video/macmodes.c +++ b/drivers/video/macmodes.c @@ -380,7 +380,7 @@ int __init mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, if (mode_option && !strncmp(mode_option, "mac", 3)) { mode_option += 3; db = mac_modedb; - dbsize = sizeof(mac_modedb)/sizeof(*mac_modedb); + dbsize = ARRAY_SIZE(mac_modedb); } return fb_find_mode(var, info, mode_option, db, dbsize, &mac_modedb[DEFAULT_MODEDB_INDEX], default_bpp); diff --git a/drivers/video/matrox/matroxfb_g450.c b/drivers/video/matrox/matroxfb_g450.c index c122d8743dd2..4d610b405d45 100644 --- a/drivers/video/matrox/matroxfb_g450.c +++ b/drivers/video/matrox/matroxfb_g450.c @@ -59,7 +59,7 @@ static const struct mctl g450_controls[] = }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) }, }; -#define G450CTRLS (sizeof(g450_controls)/sizeof(g450_controls[0])) +#define G450CTRLS ARRAY_SIZE(g450_controls) /* Return: positive number: id found -EINVAL: id not found, return failure diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c index 6019710dc298..5d29a26b8cdf 100644 --- a/drivers/video/matrox/matroxfb_maven.c +++ b/drivers/video/matrox/matroxfb_maven.c @@ -89,12 +89,12 @@ static const struct mctl maven_controls[] = }, offsetof(struct matrox_fb_info, altout.tvo_params.hue) }, { { V4L2_CID_GAMMA, V4L2_CTRL_TYPE_INTEGER, "gamma", - 0, sizeof(maven_gamma)/sizeof(maven_gamma[0])-1, 1, 3, + 0, ARRAY_SIZE(maven_gamma) - 1, 1, 3, 0, }, offsetof(struct matrox_fb_info, altout.tvo_params.gamma) }, { { MATROXFB_CID_TESTOUT, V4L2_CTRL_TYPE_BOOLEAN, "test output", - 0, 1, 1, 0, + 0, 1, 1, 0, 0, }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) }, { { MATROXFB_CID_DEFLICKER, V4L2_CTRL_TYPE_INTEGER, @@ -105,7 +105,7 @@ static const struct mctl maven_controls[] = }; -#define MAVCTRLS (sizeof(maven_controls)/sizeof(maven_controls[0])) +#define MAVCTRLS ARRAY_SIZE(maven_controls) /* Return: positive number: id found -EINVAL: id not found, return failure @@ -129,7 +129,7 @@ static int get_ctrl_id(__u32 v4l2_id) { struct maven_data { struct matrox_fb_info* primary_head; - struct i2c_client* client; + struct i2c_client client; int version; }; @@ -970,7 +970,7 @@ static inline int maven_compute_timming(struct maven_data* md, static int maven_program_timming(struct maven_data* md, const struct mavenregs* m) { - struct i2c_client* c = md->client; + struct i2c_client* c = &md->client; if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { LR(0x80); @@ -1007,7 +1007,7 @@ static int maven_program_timming(struct maven_data* md, } static inline int maven_resync(struct maven_data* md) { - struct i2c_client* c = md->client; + struct i2c_client* c = &md->client; maven_set_reg(c, 0x95, 0x20); /* start whole thing */ return 0; } @@ -1065,48 +1065,48 @@ static int maven_set_control (struct maven_data* md, maven_compute_bwlevel(md, &blacklevel, &whitelevel); blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); - maven_set_reg_pair(md->client, 0x0e, blacklevel); - maven_set_reg_pair(md->client, 0x1e, whitelevel); + maven_set_reg_pair(&md->client, 0x0e, blacklevel); + maven_set_reg_pair(&md->client, 0x1e, whitelevel); } break; case V4L2_CID_SATURATION: { - maven_set_reg(md->client, 0x20, p->value); - maven_set_reg(md->client, 0x22, p->value); + maven_set_reg(&md->client, 0x20, p->value); + maven_set_reg(&md->client, 0x22, p->value); } break; case V4L2_CID_HUE: { - maven_set_reg(md->client, 0x25, p->value); + maven_set_reg(&md->client, 0x25, p->value); } break; case V4L2_CID_GAMMA: { const struct maven_gamma* g; g = maven_compute_gamma(md); - maven_set_reg(md->client, 0x83, g->reg83); - maven_set_reg(md->client, 0x84, g->reg84); - maven_set_reg(md->client, 0x85, g->reg85); - maven_set_reg(md->client, 0x86, g->reg86); - maven_set_reg(md->client, 0x87, g->reg87); - maven_set_reg(md->client, 0x88, g->reg88); - maven_set_reg(md->client, 0x89, g->reg89); - maven_set_reg(md->client, 0x8a, g->reg8a); - maven_set_reg(md->client, 0x8b, g->reg8b); + maven_set_reg(&md->client, 0x83, g->reg83); + maven_set_reg(&md->client, 0x84, g->reg84); + maven_set_reg(&md->client, 0x85, g->reg85); + maven_set_reg(&md->client, 0x86, g->reg86); + maven_set_reg(&md->client, 0x87, g->reg87); + maven_set_reg(&md->client, 0x88, g->reg88); + maven_set_reg(&md->client, 0x89, g->reg89); + maven_set_reg(&md->client, 0x8a, g->reg8a); + maven_set_reg(&md->client, 0x8b, g->reg8b); } break; case MATROXFB_CID_TESTOUT: { unsigned char val - = maven_get_reg (md->client,0x8d); + = maven_get_reg(&md->client,0x8d); if (p->value) val |= 0x10; else val &= ~0x10; - maven_set_reg (md->client, 0x8d, val); + maven_set_reg(&md->client, 0x8d, val); } break; case MATROXFB_CID_DEFLICKER: { - maven_set_reg(md->client, 0x93, maven_compute_deflicker(md)); + maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md)); } break; } @@ -1185,7 +1185,6 @@ static int maven_init_client(struct i2c_client* clnt) { MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); md->primary_head = MINFO; - md->client = clnt; down_write(&ACCESS_FBINFO(altout.lock)); ACCESS_FBINFO(outputs[1]).output = &maven_altout; ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; @@ -1243,19 +1242,17 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_PROTOCOL_MANGLING)) goto ERROR0; - if (!(new_client = (struct i2c_client*)kmalloc(sizeof(*new_client) + sizeof(*data), - GFP_KERNEL))) { + if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) { err = -ENOMEM; goto ERROR0; } - memset(new_client, 0, sizeof(*new_client) + sizeof(*data)); - data = (struct maven_data*)(new_client + 1); + new_client = &data->client; i2c_set_clientdata(new_client, data); new_client->addr = address; new_client->adapter = adapter; new_client->driver = &maven_driver; new_client->flags = 0; - strcpy(new_client->name, "maven client"); + strlcpy(new_client->name, "maven", I2C_NAME_SIZE); if ((err = i2c_attach_client(new_client))) goto ERROR3; err = maven_init_client(new_client); @@ -1279,12 +1276,10 @@ static int maven_attach_adapter(struct i2c_adapter* adapter) { static int maven_detach_client(struct i2c_client* client) { int err; - if ((err = i2c_detach_client(client))) { - printk(KERN_ERR "maven: Cannot deregister client\n"); + if ((err = i2c_detach_client(client))) return err; - } maven_shutdown_client(client); - kfree(client); + kfree(i2c_get_clientdata(client)); return 0; } @@ -1297,20 +1292,13 @@ static struct i2c_driver maven_driver={ .detach_client = maven_detach_client, }; -/* ************************** */ - -static int matroxfb_maven_init(void) { - int err; - - err = i2c_add_driver(&maven_driver); - if (err) { - printk(KERN_ERR "maven: Maven driver failed to register (%d).\n", err); - return err; - } - return 0; +static int __init matroxfb_maven_init(void) +{ + return i2c_add_driver(&maven_driver); } -static void matroxfb_maven_exit(void) { +static void __exit matroxfb_maven_exit(void) +{ i2c_del_driver(&maven_driver); } diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c index 1da2f84bdc25..26a1c618a205 100644 --- a/drivers/video/modedb.c +++ b/drivers/video/modedb.c @@ -183,6 +183,10 @@ static const struct fb_videomode modedb[] = { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { + /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ + NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + }, { /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED @@ -496,7 +500,7 @@ int fb_find_mode(struct fb_var_screeninfo *var, /* Set up defaults */ if (!db) { db = modedb; - dbsize = sizeof(modedb)/sizeof(*modedb); + dbsize = ARRAY_SIZE(modedb); } if (!default_mode) default_mode = &modedb[DEFAULT_MODEDB_INDEX]; diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index b961d5601bd9..24b12f71d5a8 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c @@ -165,20 +165,20 @@ static int neoFindMode(int xres, int yres, int depth) switch (depth) { case 8: - size = sizeof(bios8) / sizeof(biosMode); + size = ARRAY_SIZE(bios8); mode = bios8; break; case 16: - size = sizeof(bios16) / sizeof(biosMode); + size = ARRAY_SIZE(bios16); mode = bios16; break; case 24: - size = sizeof(bios24) / sizeof(biosMode); + size = ARRAY_SIZE(bios24); mode = bios24; break; #ifdef NO_32BIT_SUPPORT_YET case 32: - size = sizeof(bios32) / sizeof(biosMode); + size = ARRAY_SIZE(bios32); mode = bios32; break; #endif diff --git a/drivers/video/nvidia/nv_accel.c b/drivers/video/nvidia/nv_accel.c index f377a29ec97a..4aefb8f41637 100644 --- a/drivers/video/nvidia/nv_accel.c +++ b/drivers/video/nvidia/nv_accel.c @@ -300,6 +300,9 @@ int nvidiafb_sync(struct fb_info *info) { struct nvidia_par *par = info->par; + if (info->state != FBINFO_STATE_RUNNING) + return 0; + if (!par->lockup) NVFlush(par); @@ -313,6 +316,9 @@ void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nvidia_par *par = info->par; + if (info->state != FBINFO_STATE_RUNNING) + return; + if (par->lockup) return cfb_copyarea(info, region); @@ -329,6 +335,9 @@ void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) struct nvidia_par *par = info->par; u32 color; + if (info->state != FBINFO_STATE_RUNNING) + return; + if (par->lockup) return cfb_fillrect(info, rect); @@ -412,6 +421,9 @@ void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct nvidia_par *par = info->par; + if (info->state != FBINFO_STATE_RUNNING) + return; + if (image->depth == 1 && !par->lockup) nvidiafb_mono_color_expand(info, image); else diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c index bd9eca05e146..1edb1c432b75 100644 --- a/drivers/video/nvidia/nv_i2c.c +++ b/drivers/video/nvidia/nv_i2c.c @@ -218,8 +218,7 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid) } } - if (out_edid) - *out_edid = edid; + *out_edid = edid; return (edid) ? 0 : 1; } diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h index e4a5b1da71c4..acdc26693402 100644 --- a/drivers/video/nvidia/nv_type.h +++ b/drivers/video/nvidia/nv_type.h @@ -129,6 +129,7 @@ struct nvidia_par { int fpHeight; int PanelTweak; int paneltweak; + int pm_state; u32 crtcSync_read; u32 fpSyncs; u32 dmaPut; diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c index a7c4e5e8ead6..6d3e4890cb43 100644 --- a/drivers/video/nvidia/nvidia.c +++ b/drivers/video/nvidia/nvidia.c @@ -21,6 +21,7 @@ #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/console.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif @@ -296,6 +297,8 @@ static struct pci_device_id nvidiafb_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_NVIDIA, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_NVIDIA, 0x0313, @@ -615,6 +618,30 @@ static int nvidia_panel_tweak(struct nvidia_par *par, return tweak; } +static void nvidia_vga_protect(struct nvidia_par *par, int on) +{ + unsigned char tmp; + + if (on) { + /* + * Turn off screen and disable sequencer. + */ + tmp = NVReadSeq(par, 0x01); + + NVWriteSeq(par, 0x00, 0x01); /* Synchronous Reset */ + NVWriteSeq(par, 0x01, tmp | 0x20); /* disable the display */ + } else { + /* + * Reenable sequencer, then turn on screen. + */ + + tmp = NVReadSeq(par, 0x01); + + NVWriteSeq(par, 0x01, tmp & ~0x20); /* reenable display */ + NVWriteSeq(par, 0x00, 0x03); /* End Reset */ + } +} + static void nvidia_save_vga(struct nvidia_par *par, struct _riva_hw_state *state) { @@ -643,9 +670,9 @@ static void nvidia_save_vga(struct nvidia_par *par, #undef DUMP_REG -static void nvidia_write_regs(struct nvidia_par *par) +static void nvidia_write_regs(struct nvidia_par *par, + struct _riva_hw_state *state) { - struct _riva_hw_state *state = &par->ModeReg; int i; NVTRACE_ENTER(); @@ -694,32 +721,6 @@ static void nvidia_write_regs(struct nvidia_par *par) NVTRACE_LEAVE(); } -static void nvidia_vga_protect(struct nvidia_par *par, int on) -{ - unsigned char tmp; - - if (on) { - /* - * Turn off screen and disable sequencer. - */ - tmp = NVReadSeq(par, 0x01); - - NVWriteSeq(par, 0x00, 0x01); /* Synchronous Reset */ - NVWriteSeq(par, 0x01, tmp | 0x20); /* disable the display */ - } else { - /* - * Reenable sequencer, then turn on screen. - */ - - tmp = NVReadSeq(par, 0x01); - - NVWriteSeq(par, 0x01, tmp & ~0x20); /* reenable display */ - NVWriteSeq(par, 0x00, 0x03); /* End Reset */ - } -} - - - static int nvidia_calc_regs(struct fb_info *info) { struct nvidia_par *par = info->par; @@ -1068,7 +1069,8 @@ static int nvidiafb_set_par(struct fb_info *info) nvidia_vga_protect(par, 1); - nvidia_write_regs(par); + nvidia_write_regs(par, &par->ModeReg); + NVSetStartAddress(par, 0); #if defined (__BIG_ENDIAN) /* turn on LFB swapping */ @@ -1377,6 +1379,57 @@ static struct fb_ops nvidia_fb_ops = { .fb_sync = nvidiafb_sync, }; +#ifdef CONFIG_PM +static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t state) +{ + struct fb_info *info = pci_get_drvdata(dev); + struct nvidia_par *par = info->par; + + acquire_console_sem(); + par->pm_state = state.event; + + if (state.event == PM_EVENT_FREEZE) { + dev->dev.power.power_state = state; + } else { + fb_set_suspend(info, 1); + nvidiafb_blank(FB_BLANK_POWERDOWN, info); + nvidia_write_regs(par, &par->SavedReg); + pci_save_state(dev); + pci_disable_device(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + } + + release_console_sem(); + return 0; +} + +static int nvidiafb_resume(struct pci_dev *dev) +{ + struct fb_info *info = pci_get_drvdata(dev); + struct nvidia_par *par = info->par; + + acquire_console_sem(); + pci_set_power_state(dev, PCI_D0); + + if (par->pm_state != PM_EVENT_FREEZE) { + pci_restore_state(dev); + pci_enable_device(dev); + pci_set_master(dev); + } + + par->pm_state = PM_EVENT_ON; + nvidiafb_set_par(info); + fb_set_suspend (info, 0); + nvidiafb_blank(FB_BLANK_UNBLANK, info); + + release_console_sem(); + return 0; +} +#else +#define nvidiafb_suspend NULL +#define nvidiafb_resume NULL +#endif + static int __devinit nvidia_set_fbinfo(struct fb_info *info) { struct fb_monspecs *specs = &info->monspecs; @@ -1720,8 +1773,6 @@ static void __exit nvidiafb_remove(struct pci_dev *pd) struct nvidia_par *par = info->par; NVTRACE_ENTER(); - if (!info) - return; unregister_framebuffer(info); #ifdef CONFIG_MTRR @@ -1798,8 +1849,10 @@ static int __devinit nvidiafb_setup(char *options) static struct pci_driver nvidiafb_driver = { .name = "nvidiafb", .id_table = nvidiafb_pci_tbl, - .probe = nvidiafb_probe, - .remove = __exit_p(nvidiafb_remove), + .probe = nvidiafb_probe, + .suspend = nvidiafb_suspend, + .resume = nvidiafb_resume, + .remove = __exit_p(nvidiafb_remove), }; /* ------------------------------------------------------------------------- * diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c index eeeac924b500..73e2d7d16608 100644 --- a/drivers/video/pmagb-b-fb.c +++ b/drivers/video/pmagb-b-fb.c @@ -228,7 +228,7 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info) freq1 = (par->osc0 * count1 + count0 / 2) / count0; par->osc1 = freq1; - for (i = 0; i < sizeof(pmagbbfb_freqs) / sizeof(*pmagbbfb_freqs); i++) + for (i = 0; i < ARRAY_SIZE(pmagbbfb_freqs); i++) if (freq1 >= pmagbbfb_freqs[i] - (pmagbbfb_freqs[i] + 128) / 256 && freq1 <= pmagbbfb_freqs[i] + diff --git a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c index db9fb9074dbc..24982adb3aa2 100644 --- a/drivers/video/radeonfb.c +++ b/drivers/video/radeonfb.c @@ -759,7 +759,7 @@ static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo) rom = rom_base; for (i = 0; (i < 512) && (stage != 4); i++) { - for(j = 0;j < sizeof(radeon_sig)/sizeof(char *);j++) { + for (j = 0; j < ARRAY_SIZE(radeon_sig); j++) { if (radeon_sig[j][0] == *rom) if (strncmp(radeon_sig[j], rom, strlen(radeon_sig[j])) == 0) { diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 6c19ab6afb01..f841f013b96f 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -2072,8 +2072,6 @@ static void __exit rivafb_remove(struct pci_dev *pd) struct riva_par *par = info->par; NVTRACE_ENTER(); - if (!info) - return; #ifdef CONFIG_FB_RIVA_I2C riva_delete_i2c_busses(par); diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c index 00719a91479f..21debed863ac 100644 --- a/drivers/video/savage/savagefb-i2c.c +++ b/drivers/video/savage/savagefb-i2c.c @@ -273,8 +273,7 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid) } } - if (out_edid) - *out_edid = edid; + *out_edid = edid; return (edid) ? 0 : 1; } diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c index 2d88f908170a..c3e070a6effd 100644 --- a/drivers/video/sis/init301.c +++ b/drivers/video/sis/init301.c @@ -8564,11 +8564,9 @@ SiS_ChrontelDoSomething3(struct SiS_Private *SiS_Pr, unsigned short ModeNo) static void SiS_ChrontelDoSomething2(struct SiS_Private *SiS_Pr) { - unsigned short temp,tempcl,tempch; + unsigned short temp; SiS_LongDelay(SiS_Pr, 1); - tempcl = 3; - tempch = 0; do { temp = SiS_GetCH701x(SiS_Pr,0x66); @@ -8582,13 +8580,6 @@ SiS_ChrontelDoSomething2(struct SiS_Private *SiS_Pr) SiS_SetCH701xForLCD(SiS_Pr); - if(tempcl == 0) { - if(tempch == 3) break; - SiS_ChrontelResetDB(SiS_Pr); - tempcl = 3; - tempch++; - } - tempcl--; temp = SiS_GetCH701x(SiS_Pr,0x76); temp &= 0xfb; /* Reset PLL */ SiS_SetCH701x(SiS_Pr,0x76,temp); diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c index 8c1a8b5135c6..c44de90ca12e 100644 --- a/drivers/video/sstfb.c +++ b/drivers/video/sstfb.c @@ -1194,10 +1194,11 @@ static struct dac_switch dacs[] __devinitdata = { static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *par) { int i, ret = 0; - - for (i=0; i<sizeof(dacs)/sizeof(dacs[0]); i++) { + + for (i = 0; i < ARRAY_SIZE(dacs); i++) { ret = dacs[i].detect(info); - if (ret) break; + if (ret) + break; } if (!ret) return 0; @@ -1604,8 +1605,8 @@ static int sstfb_dump_regs(struct fb_info *info) {FBZMODE,"fbzmode"}, }; - const int pci_s = sizeof(pci_regs)/sizeof(pci_regs[0]); - const int sst_s = sizeof(sst_regs)/sizeof(sst_regs[0]); + const int pci_s = ARRAY_SIZE(pci_regs); + const int sst_s = ARRAY_SIZE(sst_regs); struct sstfb_par *par = info->par; struct pci_dev *dev = par->dev; u32 pci_res[pci_s]; diff --git a/drivers/video/virgefb.c b/drivers/video/virgefb.c index ed78747487e2..5ea2345dab99 100644 --- a/drivers/video/virgefb.c +++ b/drivers/video/virgefb.c @@ -616,8 +616,7 @@ static struct { #endif }; -#define arraysize(x) (sizeof(x)/sizeof(*(x))) -#define NUM_TOTAL_MODES arraysize(virgefb_predefined) +#define NUM_TOTAL_MODES ARRAY_SIZE(virgefb_predefined) /* * Default to 800x600 for video=virge8:, virge16: or virge32: diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 43c9f7de0314..f867b8d3e973 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -39,8 +39,8 @@ extern struct file_system_type v9fs_fs_type; extern struct address_space_operations v9fs_addr_operations; -extern struct file_operations v9fs_file_operations; -extern struct file_operations v9fs_dir_operations; +extern const struct file_operations v9fs_file_operations; +extern const struct file_operations v9fs_dir_operations; extern struct dentry_operations v9fs_dentry_operations; struct inode *v9fs_get_inode(struct super_block *sb, int mode); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 766f11f1215c..e32d5971039b 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -204,7 +204,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) return 0; } -struct file_operations v9fs_dir_operations = { +const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .readdir = v9fs_dir_readdir, .open = v9fs_file_open, diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 59e744163407..083dcfcd158e 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -266,7 +266,7 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } -struct file_operations v9fs_file_operations = { +const struct file_operations v9fs_file_operations = { .llseek = generic_file_llseek, .read = v9fs_file_read, .write = v9fs_file_write, diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h index f6cd01352cc8..29217ff36d44 100644 --- a/fs/adfs/adfs.h +++ b/fs/adfs/adfs.h @@ -85,7 +85,7 @@ void __adfs_error(struct super_block *sb, const char *function, /* dir_*.c */ extern struct inode_operations adfs_dir_inode_operations; -extern struct file_operations adfs_dir_operations; +extern const struct file_operations adfs_dir_operations; extern struct dentry_operations adfs_dentry_operations; extern struct adfs_dir_ops adfs_f_dir_ops; extern struct adfs_dir_ops adfs_fplus_dir_ops; @@ -94,7 +94,7 @@ extern int adfs_dir_update(struct super_block *sb, struct object_info *obj); /* file.c */ extern struct inode_operations adfs_file_inode_operations; -extern struct file_operations adfs_file_operations; +extern const struct file_operations adfs_file_operations; static inline __u32 signed_asl(__u32 val, signed int shift) { diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index 0b4c3a028076..7b075fc397da 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -196,7 +196,7 @@ out: return ret; } -struct file_operations adfs_dir_operations = { +const struct file_operations adfs_dir_operations = { .read = generic_read_dir, .readdir = adfs_readdir, .fsync = file_fsync, diff --git a/fs/adfs/file.c b/fs/adfs/file.c index 6af10885f9d6..1014b9f2117b 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -25,7 +25,7 @@ #include "adfs.h" -struct file_operations adfs_file_operations = { +const struct file_operations adfs_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .mmap = generic_file_mmap, diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 0c6799f2137a..a43a876742b8 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -192,9 +192,9 @@ extern void affs_dir_truncate(struct inode *); extern struct inode_operations affs_file_inode_operations; extern struct inode_operations affs_dir_inode_operations; extern struct inode_operations affs_symlink_inode_operations; -extern struct file_operations affs_file_operations; -extern struct file_operations affs_file_operations_ofs; -extern struct file_operations affs_dir_operations; +extern const struct file_operations affs_file_operations; +extern const struct file_operations affs_file_operations_ofs; +extern const struct file_operations affs_dir_operations; extern struct address_space_operations affs_symlink_aops; extern struct address_space_operations affs_aops; extern struct address_space_operations affs_aops_ofs; diff --git a/fs/affs/dir.c b/fs/affs/dir.c index 548efd0ee98c..5d9649fa1814 100644 --- a/fs/affs/dir.c +++ b/fs/affs/dir.c @@ -17,7 +17,7 @@ static int affs_readdir(struct file *, void *, filldir_t); -struct file_operations affs_dir_operations = { +const struct file_operations affs_dir_operations = { .read = generic_read_dir, .readdir = affs_readdir, .fsync = file_fsync, diff --git a/fs/affs/file.c b/fs/affs/file.c index f72fb776ecdf..7076262af39b 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -25,7 +25,7 @@ static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); static int affs_file_open(struct inode *inode, struct file *filp); static int affs_file_release(struct inode *inode, struct file *filp); -struct file_operations affs_file_operations = { +const struct file_operations affs_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 5c61c24dab2a..a6dff6a4f204 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -32,7 +32,7 @@ static int afs_d_delete(struct dentry *dentry); static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos, ino_t ino, unsigned dtype); -struct file_operations afs_dir_file_operations = { +const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .readdir = afs_dir_readdir, }; diff --git a/fs/afs/file.c b/fs/afs/file.c index 150b19227922..7bb716887e29 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file); #endif static int afs_file_readpage(struct file *file, struct page *page); -static int afs_file_invalidatepage(struct page *page, unsigned long offset); +static void afs_file_invalidatepage(struct page *page, unsigned long offset); static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); struct inode_operations afs_file_inode_operations = { @@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page, /* * invalidate part or all of a page */ -static int afs_file_invalidatepage(struct page *page, unsigned long offset) +static void afs_file_invalidatepage(struct page *page, unsigned long offset) { int ret = 1; @@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset) if (!PageWriteback(page)) ret = page->mapping->a_ops->releasepage(page, 0); + /* possibly should BUG_ON(!ret); - neilb */ } } _leave(" = %d", ret); - return ret; } /* end afs_file_invalidatepage() */ /*****************************************************************************/ diff --git a/fs/afs/internal.h b/fs/afs/internal.h index ab8f87c66319..72febdf9a35a 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -64,7 +64,7 @@ extern struct cachefs_index_def afs_cache_cell_index_def; * dir.c */ extern struct inode_operations afs_dir_inode_operations; -extern struct file_operations afs_dir_file_operations; +extern const struct file_operations afs_dir_file_operations; /* * file.c @@ -105,7 +105,7 @@ extern struct cachefs_netfs afs_cache_netfs; * mntpt.c */ extern struct inode_operations afs_mntpt_inode_operations; -extern struct file_operations afs_mntpt_file_operations; +extern const struct file_operations afs_mntpt_file_operations; extern struct afs_timer afs_mntpt_expiry_timer; extern struct afs_timer_ops afs_mntpt_expiry_timer_ops; extern unsigned long afs_mntpt_expiry_timeout; diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 31ee06590de5..4e6eeb59b83c 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -32,7 +32,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, static int afs_mntpt_open(struct inode *inode, struct file *file); static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); -struct file_operations afs_mntpt_file_operations = { +const struct file_operations afs_mntpt_file_operations = { .open = afs_mntpt_open, }; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 9c81b8f7eef0..101d21b6c037 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -37,7 +37,7 @@ static struct seq_operations afs_proc_cells_ops = { .show = afs_proc_cells_show, }; -static struct file_operations afs_proc_cells_fops = { +static const struct file_operations afs_proc_cells_fops = { .open = afs_proc_cells_open, .read = seq_read, .write = afs_proc_cells_write, @@ -53,7 +53,7 @@ static ssize_t afs_proc_rootcell_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos); -static struct file_operations afs_proc_rootcell_fops = { +static const struct file_operations afs_proc_rootcell_fops = { .open = afs_proc_rootcell_open, .read = afs_proc_rootcell_read, .write = afs_proc_rootcell_write, @@ -77,7 +77,7 @@ static struct seq_operations afs_proc_cell_volumes_ops = { .show = afs_proc_cell_volumes_show, }; -static struct file_operations afs_proc_cell_volumes_fops = { +static const struct file_operations afs_proc_cell_volumes_fops = { .open = afs_proc_cell_volumes_open, .read = seq_read, .llseek = seq_lseek, @@ -101,7 +101,7 @@ static struct seq_operations afs_proc_cell_vlservers_ops = { .show = afs_proc_cell_vlservers_show, }; -static struct file_operations afs_proc_cell_vlservers_fops = { +static const struct file_operations afs_proc_cell_vlservers_fops = { .open = afs_proc_cell_vlservers_open, .read = seq_read, .llseek = seq_lseek, @@ -124,7 +124,7 @@ static struct seq_operations afs_proc_cell_servers_ops = { .show = afs_proc_cell_servers_show, }; -static struct file_operations afs_proc_cell_servers_fops = { +static const struct file_operations afs_proc_cell_servers_fops = { .open = afs_proc_cell_servers_open, .read = seq_read, .llseek = seq_lseek, diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 990c28da5aec..a62327f1bdff 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -146,7 +146,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *,struct autofs_sb_info extern struct inode_operations autofs_root_inode_operations; extern struct inode_operations autofs_symlink_inode_operations; -extern struct file_operations autofs_root_operations; +extern const struct file_operations autofs_root_operations; /* Initializing function */ diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c index 5ccfcf26310d..3fded389d06b 100644 --- a/fs/autofs/dirhash.c +++ b/fs/autofs/dirhash.c @@ -92,7 +92,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb, ; dput(dentry); - if ( may_umount(mnt) == 0 ) { + if ( may_umount(mnt) ) { mntput(mnt); DPRINTK(("autofs: signaling expire on %s\n", ent->name)); return ent; /* Expirable! */ diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 870e2cf33016..9cac08d6a873 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -26,7 +26,7 @@ static int autofs_root_rmdir(struct inode *,struct dentry *); static int autofs_root_mkdir(struct inode *,struct dentry *,int); static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); -struct file_operations autofs_root_operations = { +const struct file_operations autofs_root_operations = { .read = generic_read_dir, .readdir = autofs_root_readdir, .ioctl = autofs_root_ioctl, diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index f54c5b21f876..57c4903614e5 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -3,6 +3,7 @@ * linux/fs/autofs/autofs_i.h * * Copyright 1997-1998 Transmeta Corporation - All Rights Reserved + * Copyright 2005-2006 Ian Kent <raven@themaw.net> * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your @@ -41,14 +42,6 @@ #define AUTOFS_SUPER_MAGIC 0x0187 -/* - * If the daemon returns a negative response (AUTOFS_IOC_FAIL) then the - * kernel will keep the negative response cached for up to the time given - * here, although the time can be shorter if the kernel throws the dcache - * entry away. This probably should be settable from user space. - */ -#define AUTOFS_NEGATIVE_TIMEOUT (60*HZ) /* 1 minute */ - /* Unified info structure. This is pointed to by both the dentry and inode structures. Each file in the filesystem has an instance of this structure. It holds a reference to the dentry, so dentries are never @@ -63,6 +56,7 @@ struct autofs_info { struct autofs_sb_info *sbi; unsigned long last_used; + atomic_t count; mode_t mode; size_t size; @@ -83,23 +77,37 @@ struct autofs_wait_queue { int hash; int len; char *name; + u32 dev; + u64 ino; + uid_t uid; + gid_t gid; + pid_t pid; + pid_t tgid; /* This is for status reporting upon return */ int status; - atomic_t notified; + atomic_t notify; atomic_t wait_ctr; }; #define AUTOFS_SBI_MAGIC 0x6d4a556d +#define AUTOFS_TYPE_INDIRECT 0x0001 +#define AUTOFS_TYPE_DIRECT 0x0002 +#define AUTOFS_TYPE_OFFSET 0x0004 + struct autofs_sb_info { u32 magic; struct dentry *root; + int pipefd; struct file *pipe; pid_t oz_pgrp; int catatonic; int version; int sub_version; + int min_proto; + int max_proto; unsigned long exp_timeout; + unsigned int type; int reghost_enabled; int needs_reghost; struct super_block *sb; @@ -166,8 +174,10 @@ int autofs4_expire_multi(struct super_block *, struct vfsmount *, extern struct inode_operations autofs4_symlink_inode_operations; extern struct inode_operations autofs4_dir_inode_operations; extern struct inode_operations autofs4_root_inode_operations; -extern struct file_operations autofs4_dir_operations; -extern struct file_operations autofs4_root_operations; +extern struct inode_operations autofs4_indirect_root_inode_operations; +extern struct inode_operations autofs4_direct_root_inode_operations; +extern const struct file_operations autofs4_dir_operations; +extern const struct file_operations autofs4_root_operations; /* Initializing function */ @@ -176,13 +186,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info /* Queue management functions */ -enum autofs_notify -{ - NFY_NONE, - NFY_MOUNT, - NFY_EXPIRE -}; - int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify); int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int); void autofs4_catatonic_mode(struct autofs_sb_info *); @@ -200,12 +203,22 @@ static inline int autofs4_follow_mount(struct vfsmount **mnt, struct dentry **de return res; } +static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi) +{ + return new_encode_dev(sbi->sb->s_dev); +} + +static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi) +{ + return sbi->sb->s_root->d_inode->i_ino; +} + static inline int simple_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); } -static inline int simple_empty_nolock(struct dentry *dentry) +static inline int __simple_empty(struct dentry *dentry) { struct dentry *child; int ret = 0; @@ -217,3 +230,6 @@ static inline int simple_empty_nolock(struct dentry *dentry) out: return ret; } + +void autofs4_dentry_release(struct dentry *); + diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index dc39589df165..b8ce02607d66 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -4,7 +4,7 @@ * * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> - * Copyright 2001-2003 Ian Kent <raven@themaw.net> + * Copyright 2001-2006 Ian Kent <raven@themaw.net> * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your @@ -16,7 +16,7 @@ static unsigned long now; -/* Check if a dentry can be expired return 1 if it can else return 0 */ +/* Check if a dentry can be expired */ static inline int autofs4_can_expire(struct dentry *dentry, unsigned long timeout, int do_now) { @@ -41,14 +41,14 @@ static inline int autofs4_can_expire(struct dentry *dentry, attempts if expire fails the first time */ ino->last_used = now; } - return 1; } -/* Check a mount point for busyness return 1 if not busy, otherwise */ -static int autofs4_check_mount(struct vfsmount *mnt, struct dentry *dentry) +/* Check a mount point for busyness */ +static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry) { - int status = 0; + struct dentry *top = dentry; + int status = 1; DPRINTK("dentry %p %.*s", dentry, (int)dentry->d_name.len, dentry->d_name.name); @@ -63,9 +63,14 @@ static int autofs4_check_mount(struct vfsmount *mnt, struct dentry *dentry) if (is_autofs4_dentry(dentry)) goto done; - /* The big question */ - if (may_umount_tree(mnt) == 0) - status = 1; + /* Update the expiry counter if fs is busy */ + if (!may_umount_tree(mnt)) { + struct autofs_info *ino = autofs4_dentry_ino(top); + ino->last_used = jiffies; + goto done; + } + + status = 0; done: DPRINTK("returning = %d", status); mntput(mnt); @@ -73,78 +78,124 @@ done: return status; } +/* + * Calculate next entry in top down tree traversal. + * From next_mnt in namespace.c - elegant. + */ +static struct dentry *next_dentry(struct dentry *p, struct dentry *root) +{ + struct list_head *next = p->d_subdirs.next; + + if (next == &p->d_subdirs) { + while (1) { + if (p == root) + return NULL; + next = p->d_u.d_child.next; + if (next != &p->d_parent->d_subdirs) + break; + p = p->d_parent; + } + } + return list_entry(next, struct dentry, d_u.d_child); +} + +/* + * Check a direct mount point for busyness. + * Direct mounts have similar expiry semantics to tree mounts. + * The tree is not busy iff no mountpoints are busy and there are no + * autofs submounts. + */ +static int autofs4_direct_busy(struct vfsmount *mnt, + struct dentry *top, + unsigned long timeout, + int do_now) +{ + DPRINTK("top %p %.*s", + top, (int) top->d_name.len, top->d_name.name); + + /* If it's busy update the expiry counters */ + if (!may_umount_tree(mnt)) { + struct autofs_info *ino = autofs4_dentry_ino(top); + if (ino) + ino->last_used = jiffies; + return 1; + } + + /* Timeout of a direct mount is determined by its top dentry */ + if (!autofs4_can_expire(top, timeout, do_now)) + return 1; + + return 0; +} + /* Check a directory tree of mount points for busyness * The tree is not busy iff no mountpoints are busy - * Return 1 if the tree is busy or 0 otherwise */ -static int autofs4_check_tree(struct vfsmount *mnt, - struct dentry *top, - unsigned long timeout, - int do_now) +static int autofs4_tree_busy(struct vfsmount *mnt, + struct dentry *top, + unsigned long timeout, + int do_now) { - struct dentry *this_parent = top; - struct list_head *next; + struct autofs_info *top_ino = autofs4_dentry_ino(top); + struct dentry *p; - DPRINTK("parent %p %.*s", + DPRINTK("top %p %.*s", top, (int)top->d_name.len, top->d_name.name); /* Negative dentry - give up */ if (!simple_positive(top)) - return 0; - - /* Timeout of a tree mount is determined by its top dentry */ - if (!autofs4_can_expire(top, timeout, do_now)) - return 0; - - /* Is someone visiting anywhere in the tree ? */ - if (may_umount_tree(mnt)) - return 0; + return 1; spin_lock(&dcache_lock); -repeat: - next = this_parent->d_subdirs.next; -resume: - while (next != &this_parent->d_subdirs) { - struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); - + for (p = top; p; p = next_dentry(p, top)) { /* Negative dentry - give up */ - if (!simple_positive(dentry)) { - next = next->next; + if (!simple_positive(p)) continue; - } DPRINTK("dentry %p %.*s", - dentry, (int)dentry->d_name.len, dentry->d_name.name); - - if (!simple_empty_nolock(dentry)) { - this_parent = dentry; - goto repeat; - } + p, (int) p->d_name.len, p->d_name.name); - dentry = dget(dentry); + p = dget(p); spin_unlock(&dcache_lock); - if (d_mountpoint(dentry)) { - /* First busy => tree busy */ - if (!autofs4_check_mount(mnt, dentry)) { - dput(dentry); - return 0; + /* + * Is someone visiting anywhere in the subtree ? + * If there's no mount we need to check the usage + * count for the autofs dentry. + * If the fs is busy update the expiry counter. + */ + if (d_mountpoint(p)) { + if (autofs4_mount_busy(mnt, p)) { + top_ino->last_used = jiffies; + dput(p); + return 1; + } + } else { + struct autofs_info *ino = autofs4_dentry_ino(p); + unsigned int ino_count = atomic_read(&ino->count); + + /* allow for dget above and top is already dgot */ + if (p == top) + ino_count += 2; + else + ino_count++; + + if (atomic_read(&p->d_count) > ino_count) { + top_ino->last_used = jiffies; + dput(p); + return 1; } } - - dput(dentry); + dput(p); spin_lock(&dcache_lock); - next = next->next; - } - - if (this_parent != top) { - next = this_parent->d_u.d_child.next; - this_parent = this_parent->d_parent; - goto resume; } spin_unlock(&dcache_lock); - return 1; + /* Timeout of a tree mount is ultimately determined by its top dentry */ + if (!autofs4_can_expire(top, timeout, do_now)) + return 1; + + return 0; } static struct dentry *autofs4_check_leaves(struct vfsmount *mnt, @@ -152,58 +203,68 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt, unsigned long timeout, int do_now) { - struct dentry *this_parent = parent; - struct list_head *next; + struct dentry *p; DPRINTK("parent %p %.*s", parent, (int)parent->d_name.len, parent->d_name.name); spin_lock(&dcache_lock); -repeat: - next = this_parent->d_subdirs.next; -resume: - while (next != &this_parent->d_subdirs) { - struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); - + for (p = parent; p; p = next_dentry(p, parent)) { /* Negative dentry - give up */ - if (!simple_positive(dentry)) { - next = next->next; + if (!simple_positive(p)) continue; - } DPRINTK("dentry %p %.*s", - dentry, (int)dentry->d_name.len, dentry->d_name.name); - - if (!list_empty(&dentry->d_subdirs)) { - this_parent = dentry; - goto repeat; - } + p, (int) p->d_name.len, p->d_name.name); - dentry = dget(dentry); + p = dget(p); spin_unlock(&dcache_lock); - if (d_mountpoint(dentry)) { - /* Can we expire this guy */ - if (!autofs4_can_expire(dentry, timeout, do_now)) - goto cont; - + if (d_mountpoint(p)) { /* Can we umount this guy */ - if (autofs4_check_mount(mnt, dentry)) - return dentry; + if (autofs4_mount_busy(mnt, p)) + goto cont; + /* Can we expire this guy */ + if (autofs4_can_expire(p, timeout, do_now)) + return p; } cont: - dput(dentry); + dput(p); spin_lock(&dcache_lock); - next = next->next; } + spin_unlock(&dcache_lock); + return NULL; +} + +/* Check if we can expire a direct mount (possibly a tree) */ +static struct dentry *autofs4_expire_direct(struct super_block *sb, + struct vfsmount *mnt, + struct autofs_sb_info *sbi, + int how) +{ + unsigned long timeout; + struct dentry *root = dget(sb->s_root); + int do_now = how & AUTOFS_EXP_IMMEDIATE; + + if (!sbi->exp_timeout || !root) + return NULL; + + now = jiffies; + timeout = sbi->exp_timeout; + + /* Lock the tree as we must expire as a whole */ + spin_lock(&sbi->fs_lock); + if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { + struct autofs_info *ino = autofs4_dentry_ino(root); - if (this_parent != parent) { - next = this_parent->d_u.d_child.next; - this_parent = this_parent->d_parent; - goto resume; + /* Set this flag early to catch sys_chdir and the like */ + ino->flags |= AUTOFS_INF_EXPIRING; + spin_unlock(&sbi->fs_lock); + return root; } - spin_unlock(&dcache_lock); + spin_unlock(&sbi->fs_lock); + dput(root); return NULL; } @@ -214,10 +275,10 @@ cont: * - it is unused by any user process * - it has been unused for exp_timeout time */ -static struct dentry *autofs4_expire(struct super_block *sb, - struct vfsmount *mnt, - struct autofs_sb_info *sbi, - int how) +static struct dentry *autofs4_expire_indirect(struct super_block *sb, + struct vfsmount *mnt, + struct autofs_sb_info *sbi, + int how) { unsigned long timeout; struct dentry *root = sb->s_root; @@ -241,7 +302,7 @@ static struct dentry *autofs4_expire(struct super_block *sb, struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); /* Negative dentry - give up */ - if ( !simple_positive(dentry) ) { + if (!simple_positive(dentry)) { next = next->next; continue; } @@ -249,31 +310,36 @@ static struct dentry *autofs4_expire(struct super_block *sb, dentry = dget(dentry); spin_unlock(&dcache_lock); - /* Case 1: indirect mount or top level direct mount */ + /* + * Case 1: (i) indirect mount or top level pseudo direct mount + * (autofs-4.1). + * (ii) indirect mount with offset mount, check the "/" + * offset (autofs-5.0+). + */ if (d_mountpoint(dentry)) { DPRINTK("checking mountpoint %p %.*s", dentry, (int)dentry->d_name.len, dentry->d_name.name); - /* Can we expire this guy */ - if (!autofs4_can_expire(dentry, timeout, do_now)) + /* Can we umount this guy */ + if (autofs4_mount_busy(mnt, dentry)) goto next; - /* Can we umount this guy */ - if (autofs4_check_mount(mnt, dentry)) { + /* Can we expire this guy */ + if (autofs4_can_expire(dentry, timeout, do_now)) { expired = dentry; break; } goto next; } - if ( simple_empty(dentry) ) + if (simple_empty(dentry)) goto next; /* Case 2: tree mount, expire iff entire tree is not busy */ if (!exp_leaves) { /* Lock the tree as we must expire as a whole */ spin_lock(&sbi->fs_lock); - if (autofs4_check_tree(mnt, dentry, timeout, do_now)) { + if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) { struct autofs_info *inf = autofs4_dentry_ino(dentry); /* Set this flag early to catch sys_chdir and the like */ @@ -283,7 +349,10 @@ static struct dentry *autofs4_expire(struct super_block *sb, break; } spin_unlock(&sbi->fs_lock); - /* Case 3: direct mount, expire individual leaves */ + /* + * Case 3: pseudo direct mount, expire individual leaves + * (autofs-4.1). + */ } else { expired = autofs4_check_leaves(mnt, dentry, timeout, do_now); if (expired) { @@ -297,7 +366,7 @@ next: next = next->next; } - if ( expired ) { + if (expired) { DPRINTK("returning %p %.*s", expired, (int)expired->d_name.len, expired->d_name.name); spin_lock(&dcache_lock); @@ -325,7 +394,7 @@ int autofs4_expire_run(struct super_block *sb, pkt.hdr.proto_version = sbi->version; pkt.hdr.type = autofs_ptype_expire; - if ((dentry = autofs4_expire(sb, mnt, sbi, 0)) == NULL) + if ((dentry = autofs4_expire_indirect(sb, mnt, sbi, 0)) == NULL) return -EAGAIN; pkt.len = dentry->d_name.len; @@ -351,17 +420,22 @@ int autofs4_expire_multi(struct super_block *sb, struct vfsmount *mnt, if (arg && get_user(do_now, arg)) return -EFAULT; - if ((dentry = autofs4_expire(sb, mnt, sbi, do_now)) != NULL) { - struct autofs_info *de_info = autofs4_dentry_ino(dentry); + if (sbi->type & AUTOFS_TYPE_DIRECT) + dentry = autofs4_expire_direct(sb, mnt, sbi, do_now); + else + dentry = autofs4_expire_indirect(sb, mnt, sbi, do_now); + + if (dentry) { + struct autofs_info *ino = autofs4_dentry_ino(dentry); /* This is synchronous because it makes the daemon a little easier */ - de_info->flags |= AUTOFS_INF_EXPIRING; + ino->flags |= AUTOFS_INF_EXPIRING; ret = autofs4_wait(sbi, dentry, NFY_EXPIRE); - de_info->flags &= ~AUTOFS_INF_EXPIRING; + ino->flags &= ~AUTOFS_INF_EXPIRING; dput(dentry); } - + return ret; } diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 1ad98d48e550..fde78b110ddd 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -3,6 +3,7 @@ * linux/fs/autofs/inode.c * * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved + * Copyright 2005-2006 Ian Kent <raven@themaw.net> * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your @@ -13,6 +14,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/parser.h> #include <linux/bitops.h> @@ -46,6 +48,7 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, ino->size = 0; ino->last_used = jiffies; + atomic_set(&ino->count, 0); ino->sbi = sbi; @@ -64,10 +67,19 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, void autofs4_free_ino(struct autofs_info *ino) { + struct autofs_info *p_ino; + if (ino->dentry) { ino->dentry->d_fsdata = NULL; - if (ino->dentry->d_inode) + if (ino->dentry->d_inode) { + struct dentry *parent = ino->dentry->d_parent; + if (atomic_dec_and_test(&ino->count)) { + p_ino = autofs4_dentry_ino(parent); + if (p_ino && parent != ino->dentry) + atomic_dec(&p_ino->count); + } dput(ino->dentry); + } ino->dentry = NULL; } if (ino->free) @@ -145,20 +157,44 @@ static void autofs4_put_super(struct super_block *sb) autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */ /* Clean up and release dangling references */ - if (sbi) - autofs4_force_release(sbi); + autofs4_force_release(sbi); kfree(sbi); DPRINTK("shutting down"); } +static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) +{ + struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb); + + if (!sbi) + return 0; + + seq_printf(m, ",fd=%d", sbi->pipefd); + seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); + seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); + seq_printf(m, ",minproto=%d", sbi->min_proto); + seq_printf(m, ",maxproto=%d", sbi->max_proto); + + if (sbi->type & AUTOFS_TYPE_OFFSET) + seq_printf(m, ",offset"); + else if (sbi->type & AUTOFS_TYPE_DIRECT) + seq_printf(m, ",direct"); + else + seq_printf(m, ",indirect"); + + return 0; +} + static struct super_operations autofs4_sops = { .put_super = autofs4_put_super, .statfs = simple_statfs, + .show_options = autofs4_show_options, }; -enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; +enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto, + Opt_indirect, Opt_direct, Opt_offset}; static match_table_t tokens = { {Opt_fd, "fd=%u"}, @@ -167,11 +203,15 @@ static match_table_t tokens = { {Opt_pgrp, "pgrp=%u"}, {Opt_minproto, "minproto=%u"}, {Opt_maxproto, "maxproto=%u"}, + {Opt_indirect, "indirect"}, + {Opt_direct, "direct"}, + {Opt_offset, "offset"}, {Opt_err, NULL} }; static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, - pid_t *pgrp, int *minproto, int *maxproto) + pid_t *pgrp, unsigned int *type, + int *minproto, int *maxproto) { char *p; substring_t args[MAX_OPT_ARGS]; @@ -225,6 +265,15 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, return 1; *maxproto = option; break; + case Opt_indirect: + *type = AUTOFS_TYPE_INDIRECT; + break; + case Opt_direct: + *type = AUTOFS_TYPE_DIRECT; + break; + case Opt_offset: + *type = AUTOFS_TYPE_DIRECT | AUTOFS_TYPE_OFFSET; + break; default: return 1; } @@ -243,6 +292,10 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) return ino; } +static struct dentry_operations autofs4_sb_dentry_operations = { + .d_release = autofs4_dentry_release, +}; + int autofs4_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; @@ -251,7 +304,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) int pipefd; struct autofs_sb_info *sbi; struct autofs_info *ino; - int minproto, maxproto; sbi = (struct autofs_sb_info *) kmalloc(sizeof(*sbi), GFP_KERNEL); if ( !sbi ) @@ -263,12 +315,16 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; sbi->root = NULL; + sbi->pipefd = -1; sbi->catatonic = 0; sbi->exp_timeout = 0; sbi->oz_pgrp = process_group(current); sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; + sbi->type = 0; + sbi->min_proto = 0; + sbi->max_proto = 0; mutex_init(&sbi->wq_mutex); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; @@ -285,38 +341,46 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) if (!ino) goto fail_free; root_inode = autofs4_get_inode(s, ino); - kfree(ino); if (!root_inode) - goto fail_free; + goto fail_ino; - root_inode->i_op = &autofs4_root_inode_operations; - root_inode->i_fop = &autofs4_root_operations; root = d_alloc_root(root_inode); - pipe = NULL; - if (!root) goto fail_iput; + pipe = NULL; + + root->d_op = &autofs4_sb_dentry_operations; + root->d_fsdata = ino; /* Can this call block? */ if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, - &sbi->oz_pgrp, - &minproto, &maxproto)) { + &sbi->oz_pgrp, &sbi->type, + &sbi->min_proto, &sbi->max_proto)) { printk("autofs: called with bogus options\n"); goto fail_dput; } + root_inode->i_fop = &autofs4_root_operations; + root_inode->i_op = sbi->type & AUTOFS_TYPE_DIRECT ? + &autofs4_direct_root_inode_operations : + &autofs4_indirect_root_inode_operations; + /* Couldn't this be tested earlier? */ - if (maxproto < AUTOFS_MIN_PROTO_VERSION || - minproto > AUTOFS_MAX_PROTO_VERSION) { + if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION || + sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) { printk("autofs: kernel does not match daemon version " "daemon (%d, %d) kernel (%d, %d)\n", - minproto, maxproto, + sbi->min_proto, sbi->max_proto, AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION); goto fail_dput; } - sbi->version = maxproto > AUTOFS_MAX_PROTO_VERSION ? AUTOFS_MAX_PROTO_VERSION : maxproto; + /* Establish highest kernel protocol version */ + if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION) + sbi->version = AUTOFS_MAX_PROTO_VERSION; + else + sbi->version = sbi->max_proto; sbi->sub_version = AUTOFS_PROTO_SUBVERSION; DPRINTK("pipe fd = %d, pgrp = %u", pipefd, sbi->oz_pgrp); @@ -329,6 +393,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) if ( !pipe->f_op || !pipe->f_op->write ) goto fail_fput; sbi->pipe = pipe; + sbi->pipefd = pipefd; /* * Take a reference to the root dentry so we get a chance to @@ -356,6 +421,8 @@ fail_dput: fail_iput: printk("autofs: get root dentry failed\n"); iput(root_inode); +fail_ino: + kfree(ino); fail_free: kfree(sbi); fail_unlock: diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 62d8d4acb8bb..84e030c8ddd0 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -4,7 +4,7 @@ * * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> - * Copyright 2001-2003 Ian Kent <raven@themaw.net> + * Copyright 2001-2006 Ian Kent <raven@themaw.net> * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your @@ -30,9 +30,9 @@ static int autofs4_dir_close(struct inode *inode, struct file *file); static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir); static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); -static int autofs4_dcache_readdir(struct file *, void *, filldir_t); +static void *autofs4_follow_link(struct dentry *, struct nameidata *); -struct file_operations autofs4_root_operations = { +const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, @@ -40,14 +40,14 @@ struct file_operations autofs4_root_operations = { .ioctl = autofs4_root_ioctl, }; -struct file_operations autofs4_dir_operations = { +const struct file_operations autofs4_dir_operations = { .open = autofs4_dir_open, .release = autofs4_dir_close, .read = generic_read_dir, .readdir = autofs4_dir_readdir, }; -struct inode_operations autofs4_root_inode_operations = { +struct inode_operations autofs4_indirect_root_inode_operations = { .lookup = autofs4_lookup, .unlink = autofs4_dir_unlink, .symlink = autofs4_dir_symlink, @@ -55,6 +55,14 @@ struct inode_operations autofs4_root_inode_operations = { .rmdir = autofs4_dir_rmdir, }; +struct inode_operations autofs4_direct_root_inode_operations = { + .lookup = autofs4_lookup, + .unlink = autofs4_dir_unlink, + .mkdir = autofs4_dir_mkdir, + .rmdir = autofs4_dir_rmdir, + .follow_link = autofs4_follow_link, +}; + struct inode_operations autofs4_dir_inode_operations = { .lookup = autofs4_lookup, .unlink = autofs4_dir_unlink, @@ -82,87 +90,7 @@ static int autofs4_root_readdir(struct file *file, void *dirent, DPRINTK("needs_reghost = %d", sbi->needs_reghost); - return autofs4_dcache_readdir(file, dirent, filldir); -} - -/* Update usage from here to top of tree, so that scan of - top-level directories will give a useful result */ -static void autofs4_update_usage(struct vfsmount *mnt, struct dentry *dentry) -{ - struct dentry *top = dentry->d_sb->s_root; - - spin_lock(&dcache_lock); - for(; dentry != top; dentry = dentry->d_parent) { - struct autofs_info *ino = autofs4_dentry_ino(dentry); - - if (ino) { - touch_atime(mnt, dentry); - ino->last_used = jiffies; - } - } - spin_unlock(&dcache_lock); -} - -/* - * From 2.4 kernel readdir.c - */ -static int autofs4_dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) -{ - int i; - struct dentry *dentry = filp->f_dentry; - - i = filp->f_pos; - switch (i) { - case 0: - if (filldir(dirent, ".", 1, i, dentry->d_inode->i_ino, DT_DIR) < 0) - break; - i++; - filp->f_pos++; - /* fallthrough */ - case 1: - if (filldir(dirent, "..", 2, i, dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) - break; - i++; - filp->f_pos++; - /* fallthrough */ - default: { - struct list_head *list; - int j = i-2; - - spin_lock(&dcache_lock); - list = dentry->d_subdirs.next; - - for (;;) { - if (list == &dentry->d_subdirs) { - spin_unlock(&dcache_lock); - return 0; - } - if (!j) - break; - j--; - list = list->next; - } - - while(1) { - struct dentry *de = list_entry(list, - struct dentry, d_u.d_child); - - if (!d_unhashed(de) && de->d_inode) { - spin_unlock(&dcache_lock); - if (filldir(dirent, de->d_name.name, de->d_name.len, filp->f_pos, de->d_inode->i_ino, DT_UNKNOWN) < 0) - break; - spin_lock(&dcache_lock); - } - filp->f_pos++; - list = list->next; - if (list != &dentry->d_subdirs) - continue; - spin_unlock(&dcache_lock); - break; - } - } - } - return 0; + return dcache_readdir(file, dirent, filldir); } static int autofs4_dir_open(struct inode *inode, struct file *file) @@ -170,8 +98,16 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) struct dentry *dentry = file->f_dentry; struct vfsmount *mnt = file->f_vfsmnt; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct dentry *cursor; int status; + status = dcache_dir_open(inode, file); + if (status) + goto out; + + cursor = file->private_data; + cursor->d_fsdata = NULL; + DPRINTK("file=%p dentry=%p %.*s", file, dentry, dentry->d_name.len, dentry->d_name.name); @@ -180,12 +116,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) if (autofs4_ispending(dentry)) { DPRINTK("dentry busy"); - return -EBUSY; + dcache_dir_close(inode, file); + status = -EBUSY; + goto out; } + status = -ENOENT; if (!d_mountpoint(dentry) && dentry->d_op && dentry->d_op->d_revalidate) { struct nameidata nd; - int empty; + int empty, ret; /* In case there are stale directory dentrys from a failed mount */ spin_lock(&dcache_lock); @@ -195,13 +134,13 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) if (!empty) d_invalidate(dentry); - nd.dentry = dentry; - nd.mnt = mnt; nd.flags = LOOKUP_DIRECTORY; - status = (dentry->d_op->d_revalidate)(dentry, &nd); + ret = (dentry->d_op->d_revalidate)(dentry, &nd); - if (!status) - return -ENOENT; + if (!ret) { + dcache_dir_close(inode, file); + goto out; + } } if (d_mountpoint(dentry)) { @@ -212,25 +151,29 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) if (!autofs4_follow_mount(&fp_mnt, &fp_dentry)) { dput(fp_dentry); mntput(fp_mnt); - return -ENOENT; + dcache_dir_close(inode, file); + goto out; } fp = dentry_open(fp_dentry, fp_mnt, file->f_flags); status = PTR_ERR(fp); if (IS_ERR(fp)) { - file->private_data = NULL; - return status; + dcache_dir_close(inode, file); + goto out; } - file->private_data = fp; + cursor->d_fsdata = fp; } -out: return 0; +out: + return status; } static int autofs4_dir_close(struct inode *inode, struct file *file) { struct dentry *dentry = file->f_dentry; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct dentry *cursor = file->private_data; + int status = 0; DPRINTK("file=%p dentry=%p %.*s", file, dentry, dentry->d_name.len, dentry->d_name.name); @@ -240,26 +183,28 @@ static int autofs4_dir_close(struct inode *inode, struct file *file) if (autofs4_ispending(dentry)) { DPRINTK("dentry busy"); - return -EBUSY; + status = -EBUSY; + goto out; } if (d_mountpoint(dentry)) { - struct file *fp = file->private_data; - - if (!fp) - return -ENOENT; - + struct file *fp = cursor->d_fsdata; + if (!fp) { + status = -ENOENT; + goto out; + } filp_close(fp, current->files); - file->private_data = NULL; } out: - return 0; + dcache_dir_close(inode, file); + return status; } static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldir) { struct dentry *dentry = file->f_dentry; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct dentry *cursor = file->private_data; int status; DPRINTK("file=%p dentry=%p %.*s", @@ -274,7 +219,7 @@ static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldi } if (d_mountpoint(dentry)) { - struct file *fp = file->private_data; + struct file *fp = cursor->d_fsdata; if (!fp) return -ENOENT; @@ -289,27 +234,26 @@ static int autofs4_dir_readdir(struct file *file, void *dirent, filldir_t filldi return status; } out: - return autofs4_dcache_readdir(file, dirent, filldir); + return dcache_readdir(file, dirent, filldir); } -static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int flags) +static int try_to_fill_dentry(struct dentry *dentry, int flags) { - struct super_block *sb = mnt->mnt_sb; - struct autofs_sb_info *sbi = autofs4_sbi(sb); - struct autofs_info *de_info = autofs4_dentry_ino(dentry); + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct autofs_info *ino = autofs4_dentry_ino(dentry); int status = 0; /* Block on any pending expiry here; invalidate the dentry when expiration is done to trigger mount request with a new dentry */ - if (de_info && (de_info->flags & AUTOFS_INF_EXPIRING)) { + if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { DPRINTK("waiting for expire %p name=%.*s", dentry, dentry->d_name.len, dentry->d_name.name); status = autofs4_wait(sbi, dentry, NFY_NONE); - + DPRINTK("expire done status=%d", status); - + /* * If the directory still exists the mount request must * continue otherwise it can't be followed at the right @@ -317,34 +261,36 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f */ status = d_invalidate(dentry); if (status != -EBUSY) - return 0; + return -ENOENT; } DPRINTK("dentry=%p %.*s ino=%p", dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); - /* Wait for a pending mount, triggering one if there isn't one already */ + /* + * Wait for a pending mount, triggering one if there + * isn't one already + */ if (dentry->d_inode == NULL) { DPRINTK("waiting for mount name=%.*s", dentry->d_name.len, dentry->d_name.name); status = autofs4_wait(sbi, dentry, NFY_MOUNT); - + DPRINTK("mount done status=%d", status); if (status && dentry->d_inode) - return 0; /* Try to get the kernel to invalidate this dentry */ - + return status; /* Try to get the kernel to invalidate this dentry */ + /* Turn this into a real negative dentry? */ if (status == -ENOENT) { - dentry->d_time = jiffies + AUTOFS_NEGATIVE_TIMEOUT; spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; spin_unlock(&dentry->d_lock); - return 1; + return status; } else if (status) { /* Return a negative dentry, but leave it "pending" */ - return 1; + return status; } /* Trigger mount for path component or follow link */ } else if (flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) || @@ -363,19 +309,87 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; spin_unlock(&dentry->d_lock); - return 0; + return status; } } - /* We don't update the usages for the autofs daemon itself, this - is necessary for recursive autofs mounts */ - if (!autofs4_oz_mode(sbi)) - autofs4_update_usage(mnt, dentry); + /* Initialize expiry counter after successful mount */ + if (ino) + ino->last_used = jiffies; spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; spin_unlock(&dentry->d_lock); - return 1; + return status; +} + +/* For autofs direct mounts the follow link triggers the mount */ +static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + int oz_mode = autofs4_oz_mode(sbi); + unsigned int lookup_type; + int status; + + DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d", + dentry, dentry->d_name.len, dentry->d_name.name, oz_mode, + nd->flags); + + /* If it's our master or we shouldn't trigger a mount we're done */ + lookup_type = nd->flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY); + if (oz_mode || !lookup_type) + goto done; + + /* + * If a request is pending wait for it. + * If it's a mount then it won't be expired till at least + * a liitle later and if it's an expire then we might need + * to mount it again. + */ + if (autofs4_ispending(dentry)) { + DPRINTK("waiting for active request %p name=%.*s", + dentry, dentry->d_name.len, dentry->d_name.name); + + status = autofs4_wait(sbi, dentry, NFY_NONE); + + DPRINTK("request done status=%d", status); + } + + /* + * If the dentry contains directories then it is an + * autofs multi-mount with no root mount offset. So + * don't try to mount it again. + */ + spin_lock(&dcache_lock); + if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { + spin_unlock(&dcache_lock); + + status = try_to_fill_dentry(dentry, 0); + if (status) + goto out_error; + + /* + * The mount succeeded but if there is no root mount + * it must be an autofs multi-mount with no root offset + * so we don't need to follow the mount. + */ + if (d_mountpoint(dentry)) { + if (!autofs4_follow_mount(&nd->mnt, &nd->dentry)) { + status = -ENOENT; + goto out_error; + } + } + + goto done; + } + spin_unlock(&dcache_lock); + +done: + return NULL; + +out_error: + path_release(nd); + return ERR_PTR(status); } /* @@ -384,47 +398,43 @@ static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int f * yet completely filled in, and revalidate has to delay such * lookups.. */ -static int autofs4_revalidate(struct dentry * dentry, struct nameidata *nd) +static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) { - struct inode * dir = dentry->d_parent->d_inode; + struct inode *dir = dentry->d_parent->d_inode; struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); int oz_mode = autofs4_oz_mode(sbi); int flags = nd ? nd->flags : 0; - int status = 1; + int status = 0; /* Pending dentry */ if (autofs4_ispending(dentry)) { if (!oz_mode) - status = try_to_fill_dentry(nd->mnt, dentry, flags); - return status; + status = try_to_fill_dentry(dentry, flags); + return !status; } /* Negative dentry.. invalidate if "old" */ if (dentry->d_inode == NULL) - return (dentry->d_time - jiffies <= AUTOFS_NEGATIVE_TIMEOUT); + return 0; /* Check for a non-mountpoint directory with no contents */ spin_lock(&dcache_lock); if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) && - list_empty(&dentry->d_subdirs)) { + __simple_empty(dentry)) { DPRINTK("dentry=%p %.*s, emptydir", dentry, dentry->d_name.len, dentry->d_name.name); spin_unlock(&dcache_lock); if (!oz_mode) - status = try_to_fill_dentry(nd->mnt, dentry, flags); - return status; + status = try_to_fill_dentry(dentry, flags); + return !status; } spin_unlock(&dcache_lock); - /* Update the usage list */ - if (!oz_mode) - autofs4_update_usage(nd->mnt, dentry); - return 1; } -static void autofs4_dentry_release(struct dentry *de) +void autofs4_dentry_release(struct dentry *de) { struct autofs_info *inf; @@ -462,12 +472,13 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name); + /* File name too long to exist */ if (dentry->d_name.len > NAME_MAX) - return ERR_PTR(-ENAMETOOLONG);/* File name too long to exist */ + return ERR_PTR(-ENAMETOOLONG); sbi = autofs4_sbi(dir->i_sb); - oz_mode = autofs4_oz_mode(sbi); + DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", current->pid, process_group(current), sbi->catatonic, oz_mode); @@ -519,7 +530,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s * doesn't do the right thing for all system calls, but it should * be OK for the operations we permit from an autofs. */ - if ( dentry->d_inode && d_unhashed(dentry) ) + if (dentry->d_inode && d_unhashed(dentry)) return ERR_PTR(-ENOENT); return NULL; @@ -531,6 +542,7 @@ static int autofs4_dir_symlink(struct inode *dir, { struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); + struct autofs_info *p_ino; struct inode *inode; char *cp; @@ -564,6 +576,10 @@ static int autofs4_dir_symlink(struct inode *dir, dentry->d_fsdata = ino; ino->dentry = dget(dentry); + atomic_inc(&ino->count); + p_ino = autofs4_dentry_ino(dentry->d_parent); + if (p_ino && dentry->d_parent != dentry) + atomic_inc(&p_ino->count); ino->inode = inode; dir->i_mtime = CURRENT_TIME; @@ -590,11 +606,17 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); + struct autofs_info *p_ino; /* This allows root to remove symlinks */ if ( !autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) ) return -EACCES; + if (atomic_dec_and_test(&ino->count)) { + p_ino = autofs4_dentry_ino(dentry->d_parent); + if (p_ino && dentry->d_parent != dentry) + atomic_dec(&p_ino->count); + } dput(ino->dentry); dentry->d_inode->i_size = 0; @@ -611,6 +633,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); + struct autofs_info *p_ino; if (!autofs4_oz_mode(sbi)) return -EACCES; @@ -625,8 +648,12 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); + if (atomic_dec_and_test(&ino->count)) { + p_ino = autofs4_dentry_ino(dentry->d_parent); + if (p_ino && dentry->d_parent != dentry) + atomic_dec(&p_ino->count); + } dput(ino->dentry); - dentry->d_inode->i_size = 0; dentry->d_inode->i_nlink = 0; @@ -640,6 +667,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); + struct autofs_info *p_ino; struct inode *inode; if ( !autofs4_oz_mode(sbi) ) @@ -662,6 +690,10 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) dentry->d_fsdata = ino; ino->dentry = dget(dentry); + atomic_inc(&ino->count); + p_ino = autofs4_dentry_ino(dentry->d_parent); + if (p_ino && dentry->d_parent != dentry) + atomic_inc(&p_ino->count); ino->inode = inode; dir->i_nlink++; dir->i_mtime = CURRENT_TIME; @@ -745,7 +777,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) { int status = 0; - if (may_umount(mnt) == 0) + if (may_umount(mnt)) status = 1; DPRINTK("returning %d", status); diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index be78e9378c03..142ab6aa2aa1 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -3,7 +3,7 @@ * linux/fs/autofs/waitq.c * * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved - * Copyright 2001-2003 Ian Kent <raven@themaw.net> + * Copyright 2001-2006 Ian Kent <raven@themaw.net> * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your @@ -33,7 +33,7 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi) sbi->catatonic = 1; wq = sbi->queues; sbi->queues = NULL; /* Erase all wait queues */ - while ( wq ) { + while (wq) { nwq = wq->next; wq->status = -ENOENT; /* Magic is gone - report failure */ kfree(wq->name); @@ -45,7 +45,6 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi) fput(sbi->pipe); /* Close the pipe */ sbi->pipe = NULL; } - shrink_dcache_sb(sbi->sb); } @@ -98,7 +97,10 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, pkt.hdr.proto_version = sbi->version; pkt.hdr.type = type; - if (type == autofs_ptype_missing) { + switch (type) { + /* Kernel protocol v4 missing and expire packets */ + case autofs_ptype_missing: + { struct autofs_packet_missing *mp = &pkt.missing; pktsz = sizeof(*mp); @@ -107,7 +109,10 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, mp->len = wq->len; memcpy(mp->name, wq->name, wq->len); mp->name[wq->len] = '\0'; - } else if (type == autofs_ptype_expire_multi) { + break; + } + case autofs_ptype_expire_multi: + { struct autofs_packet_expire_multi *ep = &pkt.expire_multi; pktsz = sizeof(*ep); @@ -116,7 +121,34 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, ep->len = wq->len; memcpy(ep->name, wq->name, wq->len); ep->name[wq->len] = '\0'; - } else { + break; + } + /* + * Kernel protocol v5 packet for handling indirect and direct + * mount missing and expire requests + */ + case autofs_ptype_missing_indirect: + case autofs_ptype_expire_indirect: + case autofs_ptype_missing_direct: + case autofs_ptype_expire_direct: + { + struct autofs_v5_packet *packet = &pkt.v5_packet; + + pktsz = sizeof(*packet); + + packet->wait_queue_token = wq->wait_queue_token; + packet->len = wq->len; + memcpy(packet->name, wq->name, wq->len); + packet->name[wq->len] = '\0'; + packet->dev = wq->dev; + packet->ino = wq->ino; + packet->uid = wq->uid; + packet->gid = wq->gid; + packet->pid = wq->pid; + packet->tgid = wq->tgid; + break; + } + default: printk("autofs4_notify_daemon: bad type %d!\n", type); return; } @@ -162,21 +194,29 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, { struct autofs_wait_queue *wq; char *name; - int len, status; + unsigned int len = 0; + unsigned int hash = 0; + int status; /* In catatonic mode, we don't wait for nobody */ - if ( sbi->catatonic ) + if (sbi->catatonic) return -ENOENT; name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return -ENOMEM; - len = autofs4_getpath(sbi, dentry, &name); - if (!len) { - kfree(name); - return -ENOENT; + /* If this is a direct mount request create a dummy name */ + if (IS_ROOT(dentry) && (sbi->type & AUTOFS_TYPE_DIRECT)) + len = sprintf(name, "%p", dentry); + else { + len = autofs4_getpath(sbi, dentry, &name); + if (!len) { + kfree(name); + return -ENOENT; + } } + hash = full_name_hash(name, len); if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); @@ -190,7 +230,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, break; } - if ( !wq ) { + if (!wq) { /* Can't wait for an expire if there's no mount */ if (notify == NFY_NONE && !d_mountpoint(dentry)) { kfree(name); @@ -200,7 +240,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* Create a new wait queue */ wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); - if ( !wq ) { + if (!wq) { kfree(name); mutex_unlock(&sbi->wq_mutex); return -ENOMEM; @@ -212,12 +252,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq->next = sbi->queues; sbi->queues = wq; init_waitqueue_head(&wq->queue); - wq->hash = dentry->d_name.hash; + wq->hash = hash; wq->name = name; wq->len = len; + wq->dev = autofs4_get_dev(sbi); + wq->ino = autofs4_get_ino(sbi); + wq->uid = current->uid; + wq->gid = current->gid; + wq->pid = current->pid; + wq->tgid = current->tgid; wq->status = -EINTR; /* Status return if interrupted */ atomic_set(&wq->wait_ctr, 2); - atomic_set(&wq->notified, 1); + atomic_set(&wq->notify, 1); mutex_unlock(&sbi->wq_mutex); } else { atomic_inc(&wq->wait_ctr); @@ -227,9 +273,26 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); } - if (notify != NFY_NONE && atomic_dec_and_test(&wq->notified)) { - int type = (notify == NFY_MOUNT ? - autofs_ptype_missing : autofs_ptype_expire_multi); + if (notify != NFY_NONE && atomic_read(&wq->notify)) { + int type; + + atomic_dec(&wq->notify); + + if (sbi->version < 5) { + if (notify == NFY_MOUNT) + type = autofs_ptype_missing; + else + type = autofs_ptype_expire_multi; + } else { + if (notify == NFY_MOUNT) + type = (sbi->type & AUTOFS_TYPE_DIRECT) ? + autofs_ptype_missing_direct : + autofs_ptype_missing_indirect; + else + type = (sbi->type & AUTOFS_TYPE_DIRECT) ? + autofs_ptype_expire_direct : + autofs_ptype_expire_indirect; + } DPRINTK("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); @@ -240,14 +303,14 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* wq->name is NULL if and only if the lock is already released */ - if ( sbi->catatonic ) { + if (sbi->catatonic) { /* We might have slept, so check again for catatonic mode */ wq->status = -ENOENT; kfree(wq->name); wq->name = NULL; } - if ( wq->name ) { + if (wq->name) { /* Block all but "shutdown" signals while waiting */ sigset_t oldset; unsigned long irqflags; @@ -283,12 +346,12 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok struct autofs_wait_queue *wq, **wql; mutex_lock(&sbi->wq_mutex); - for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { - if ( wq->wait_queue_token == wait_queue_token ) + for (wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next) { + if (wq->wait_queue_token == wait_queue_token) break; } - if ( !wq ) { + if (!wq) { mutex_unlock(&sbi->wq_mutex); return -EINVAL; } diff --git a/fs/bad_inode.c b/fs/bad_inode.c index e172180a1d8c..80599ae33966 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -22,7 +22,7 @@ static int return_EIO(void) #define EIO_ERROR ((void *) (return_EIO)) -static struct file_operations bad_file_ops = +static const struct file_operations bad_file_ops = { .llseek = EIO_ERROR, .aio_read = EIO_ERROR, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 044a59587829..68ebd10f345d 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -64,7 +64,7 @@ static const struct super_operations befs_sops = { /* slab cache for befs_inode_info objects */ static kmem_cache_t *befs_inode_cachep; -static struct file_operations befs_dir_operations = { +static const struct file_operations befs_dir_operations = { .read = generic_read_dir, .readdir = befs_readdir, }; diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h index 1fbc53f14aba..9d791004b21c 100644 --- a/fs/bfs/bfs.h +++ b/fs/bfs/bfs.h @@ -49,11 +49,11 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode) /* file.c */ extern struct inode_operations bfs_file_inops; -extern struct file_operations bfs_file_operations; +extern const struct file_operations bfs_file_operations; extern struct address_space_operations bfs_aops; /* dir.c */ extern struct inode_operations bfs_dir_inops; -extern struct file_operations bfs_dir_operations; +extern const struct file_operations bfs_dir_operations; #endif /* _FS_BFS_BFS_H */ diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 5af928fa0449..26fad9621738 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -70,7 +70,7 @@ static int bfs_readdir(struct file * f, void * dirent, filldir_t filldir) return 0; } -struct file_operations bfs_dir_operations = { +const struct file_operations bfs_dir_operations = { .read = generic_read_dir, .readdir = bfs_readdir, .fsync = file_fsync, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 807723b65daf..d83cd74a2e4e 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -17,7 +17,7 @@ #define dprintf(x...) #endif -struct file_operations bfs_file_operations = { +const struct file_operations bfs_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 6a7b730c206b..d73d75591a39 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -600,7 +600,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, return count; } -static struct file_operations bm_entry_operations = { +static const struct file_operations bm_entry_operations = { .read = bm_entry_read, .write = bm_entry_write, }; @@ -668,7 +668,7 @@ out: return count; } -static struct file_operations bm_register_operations = { +static const struct file_operations bm_register_operations = { .write = bm_register_write, }; @@ -715,7 +715,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer, return count; } -static struct file_operations bm_status_operations = { +static const struct file_operations bm_status_operations = { .read = bm_status_read, .write = bm_status_write, }; @@ -30,7 +30,7 @@ #define BIO_POOL_SIZE 256 -static kmem_cache_t *bio_slab; +static kmem_cache_t *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab; * basically we just need to survive */ #define BIO_SPLIT_ENTRIES 8 -mempool_t *bio_split_pool; +mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; @@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(gfp_t gfp_flags, void *data) -{ - return kmalloc(sizeof(struct bio_pair), gfp_flags); -} - -static void bio_pair_free(void *bp, void *data) -{ - kfree(bp); -} - /* * create memory pools for biovec's in a bio_set. @@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) if (i >= scale) pool_entries >>= 1; - *bvp = mempool_create(pool_entries, mempool_alloc_slab, - mempool_free_slab, bp->slab); + *bvp = mempool_create_slab_pool(pool_entries, bp->slab); if (!*bvp) return -ENOMEM; } @@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) if (!bs) return NULL; - bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, - mempool_free_slab, bio_slab); - + bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); if (!bs->bio_pool) goto bad; @@ -1254,8 +1241,8 @@ static int __init init_bio(void) if (!fs_bio_set) panic("bio: can't allocate bios\n"); - bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, - bio_pair_alloc, bio_pair_free, NULL); + bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, + sizeof(struct bio_pair)); if (!bio_split_pool) panic("bio: can't create split pool\n"); diff --git a/fs/block_dev.c b/fs/block_dev.c index 573fc8e0b67a..af88c43043d5 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock, static int blkdev_get_blocks(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh, int create) + struct buffer_head *bh, int create) { sector_t end_block = max_block(I_BDEV(inode)); + unsigned long max_blocks = bh->b_size >> inode->i_blkbits; if ((iblock + max_blocks) > end_block) { max_blocks = end_block - iblock; @@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static kmem_cache_t * bdev_cachep; +static kmem_cache_t * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { @@ -265,6 +266,9 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) mutex_init(&bdev->bd_mount_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); +#ifdef CONFIG_SYSFS + INIT_LIST_HEAD(&bdev->bd_holder_list); +#endif inode_init_once(&ei->vfs_inode); } } @@ -308,7 +312,7 @@ static struct file_system_type bd_type = { .kill_sb = kill_anon_super, }; -static struct vfsmount *bd_mnt; +static struct vfsmount *bd_mnt __read_mostly; struct super_block *blockdev_superblock; void __init bdev_cache_init(void) @@ -489,6 +493,300 @@ void bd_release(struct block_device *bdev) EXPORT_SYMBOL(bd_release); +#ifdef CONFIG_SYSFS +/* + * Functions for bd_claim_by_kobject / bd_release_from_kobject + * + * If a kobject is passed to bd_claim_by_kobject() + * and the kobject has a parent directory, + * following symlinks are created: + * o from the kobject to the claimed bdev + * o from "holders" directory of the bdev to the parent of the kobject + * bd_release_from_kobject() removes these symlinks. + * + * Example: + * If /dev/dm-0 maps to /dev/sda, kobject corresponding to + * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: + * /sys/block/dm-0/slaves/sda --> /sys/block/sda + * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 + */ + +static struct kobject *bdev_get_kobj(struct block_device *bdev) +{ + if (bdev->bd_contains != bdev) + return kobject_get(&bdev->bd_part->kobj); + else + return kobject_get(&bdev->bd_disk->kobj); +} + +static struct kobject *bdev_get_holder(struct block_device *bdev) +{ + if (bdev->bd_contains != bdev) + return kobject_get(bdev->bd_part->holder_dir); + else + return kobject_get(bdev->bd_disk->holder_dir); +} + +static void add_symlink(struct kobject *from, struct kobject *to) +{ + if (!from || !to) + return; + sysfs_create_link(from, to, kobject_name(to)); +} + +static void del_symlink(struct kobject *from, struct kobject *to) +{ + if (!from || !to) + return; + sysfs_remove_link(from, kobject_name(to)); +} + +/* + * 'struct bd_holder' contains pointers to kobjects symlinked by + * bd_claim_by_kobject. + * It's connected to bd_holder_list which is protected by bdev->bd_sem. + */ +struct bd_holder { + struct list_head list; /* chain of holders of the bdev */ + int count; /* references from the holder */ + struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ + struct kobject *hdev; /* e.g. "/block/dm-0" */ + struct kobject *hdir; /* e.g. "/block/sda/holders" */ + struct kobject *sdev; /* e.g. "/block/sda" */ +}; + +/* + * Get references of related kobjects at once. + * Returns 1 on success. 0 on failure. + * + * Should call bd_holder_release_dirs() after successful use. + */ +static int bd_holder_grab_dirs(struct block_device *bdev, + struct bd_holder *bo) +{ + if (!bdev || !bo) + return 0; + + bo->sdir = kobject_get(bo->sdir); + if (!bo->sdir) + return 0; + + bo->hdev = kobject_get(bo->sdir->parent); + if (!bo->hdev) + goto fail_put_sdir; + + bo->sdev = bdev_get_kobj(bdev); + if (!bo->sdev) + goto fail_put_hdev; + + bo->hdir = bdev_get_holder(bdev); + if (!bo->hdir) + goto fail_put_sdev; + + return 1; + +fail_put_sdev: + kobject_put(bo->sdev); +fail_put_hdev: + kobject_put(bo->hdev); +fail_put_sdir: + kobject_put(bo->sdir); + + return 0; +} + +/* Put references of related kobjects at once. */ +static void bd_holder_release_dirs(struct bd_holder *bo) +{ + kobject_put(bo->hdir); + kobject_put(bo->sdev); + kobject_put(bo->hdev); + kobject_put(bo->sdir); +} + +static struct bd_holder *alloc_bd_holder(struct kobject *kobj) +{ + struct bd_holder *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + + bo->count = 1; + bo->sdir = kobj; + + return bo; +} + +static void free_bd_holder(struct bd_holder *bo) +{ + kfree(bo); +} + +/** + * add_bd_holder - create sysfs symlinks for bd_claim() relationship + * + * @bdev: block device to be bd_claimed + * @bo: preallocated and initialized by alloc_bd_holder() + * + * If there is no matching entry with @bo in @bdev->bd_holder_list, + * add @bo to the list, create symlinks. + * + * Returns 1 if @bo was added to the list. + * Returns 0 if @bo wasn't used by any reason and should be freed. + */ +static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) +{ + struct bd_holder *tmp; + + if (!bo) + return 0; + + list_for_each_entry(tmp, &bdev->bd_holder_list, list) { + if (tmp->sdir == bo->sdir) { + tmp->count++; + return 0; + } + } + + if (!bd_holder_grab_dirs(bdev, bo)) + return 0; + + add_symlink(bo->sdir, bo->sdev); + add_symlink(bo->hdir, bo->hdev); + list_add_tail(&bo->list, &bdev->bd_holder_list); + return 1; +} + +/** + * del_bd_holder - delete sysfs symlinks for bd_claim() relationship + * + * @bdev: block device to be bd_claimed + * @kobj: holder's kobject + * + * If there is matching entry with @kobj in @bdev->bd_holder_list + * and no other bd_claim() from the same kobject, + * remove the struct bd_holder from the list, delete symlinks for it. + * + * Returns a pointer to the struct bd_holder when it's removed from the list + * and ready to be freed. + * Returns NULL if matching claim isn't found or there is other bd_claim() + * by the same kobject. + */ +static struct bd_holder *del_bd_holder(struct block_device *bdev, + struct kobject *kobj) +{ + struct bd_holder *bo; + + list_for_each_entry(bo, &bdev->bd_holder_list, list) { + if (bo->sdir == kobj) { + bo->count--; + BUG_ON(bo->count < 0); + if (!bo->count) { + list_del(&bo->list); + del_symlink(bo->sdir, bo->sdev); + del_symlink(bo->hdir, bo->hdev); + bd_holder_release_dirs(bo); + return bo; + } + break; + } + } + + return NULL; +} + +/** + * bd_claim_by_kobject - bd_claim() with additional kobject signature + * + * @bdev: block device to be claimed + * @holder: holder's signature + * @kobj: holder's kobject + * + * Do bd_claim() and if it succeeds, create sysfs symlinks between + * the bdev and the holder's kobject. + * Use bd_release_from_kobject() when relesing the claimed bdev. + * + * Returns 0 on success. (same as bd_claim()) + * Returns errno on failure. + */ +static int bd_claim_by_kobject(struct block_device *bdev, void *holder, + struct kobject *kobj) +{ + int res; + struct bd_holder *bo; + + if (!kobj) + return -EINVAL; + + bo = alloc_bd_holder(kobj); + if (!bo) + return -ENOMEM; + + mutex_lock(&bdev->bd_mutex); + res = bd_claim(bdev, holder); + if (res || !add_bd_holder(bdev, bo)) + free_bd_holder(bo); + mutex_unlock(&bdev->bd_mutex); + + return res; +} + +/** + * bd_release_from_kobject - bd_release() with additional kobject signature + * + * @bdev: block device to be released + * @kobj: holder's kobject + * + * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). + */ +static void bd_release_from_kobject(struct block_device *bdev, + struct kobject *kobj) +{ + struct bd_holder *bo; + + if (!kobj) + return; + + mutex_lock(&bdev->bd_mutex); + bd_release(bdev); + if ((bo = del_bd_holder(bdev, kobj))) + free_bd_holder(bo); + mutex_unlock(&bdev->bd_mutex); +} + +/** + * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() + * + * @bdev: block device to be claimed + * @holder: holder's signature + * @disk: holder's gendisk + * + * Call bd_claim_by_kobject() with getting @disk->slave_dir. + */ +int bd_claim_by_disk(struct block_device *bdev, void *holder, + struct gendisk *disk) +{ + return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); +} +EXPORT_SYMBOL_GPL(bd_claim_by_disk); + +/** + * bd_release_from_disk - wrapper function for bd_release_from_kobject() + * + * @bdev: block device to be claimed + * @disk: holder's gendisk + * + * Call bd_release_from_kobject() and put @disk->slave_dir. + */ +void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) +{ + bd_release_from_kobject(bdev, disk->slave_dir); + kobject_put(disk->slave_dir); +} +EXPORT_SYMBOL_GPL(bd_release_from_disk); +#endif + /* * Tries to open block device by device number. Use it ONLY if you * really do not have anything better - i.e. when you are behind a @@ -789,7 +1087,7 @@ struct address_space_operations def_blk_aops = { .direct_IO = blkdev_direct_IO, }; -struct file_operations def_blk_fops = { +const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, diff --git a/fs/buffer.c b/fs/buffer.c index 3b3ab5281920..23f1f3a68077 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) if (all_mapped) { printk("__find_get_block_slow() failed. " "block=%llu, b_blocknr=%llu\n", - (unsigned long long)block, (unsigned long long)bh->b_blocknr); - printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); + (unsigned long long)block, + (unsigned long long)bh->b_blocknr); + printk("b_state=0x%08lx, b_size=%zu\n", + bh->b_state, bh->b_size); printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); } out_unlock: @@ -491,7 +493,7 @@ static void free_more_memory(void) wakeup_pdflush(1024); yield(); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; if (*zones) try_to_free_pages(zones, GFP_NOFS); @@ -796,8 +798,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { - if (mapping->assoc_mapping != buffer_mapping) - BUG(); + BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (list_empty(&bh->b_assoc_buffers)) { spin_lock(&buffer_mapping->private_lock); @@ -1114,8 +1115,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, if (!page) return NULL; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); @@ -1522,8 +1522,7 @@ void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; - if (offset >= PAGE_SIZE) - BUG(); + BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) /* * This catches illegal uses and preserves the offset: @@ -1593,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page); * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -int block_invalidatepage(struct page *page, unsigned long offset) +void block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; - int ret = 1; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) @@ -1624,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset) * so real IO is not possible anymore. */ if (offset == 0) - ret = try_to_release_page(page, 0); + try_to_release_page(page, 0); out: - return ret; + return; } EXPORT_SYMBOL(block_invalidatepage); -int do_invalidatepage(struct page *page, unsigned long offset) +void do_invalidatepage(struct page *page, unsigned long offset) { - int (*invalidatepage)(struct page *, unsigned long); - invalidatepage = page->mapping->a_ops->invalidatepage; - if (invalidatepage == NULL) - invalidatepage = block_invalidatepage; - return (*invalidatepage)(page, offset); + void (*invalidatepage)(struct page *, unsigned long); + invalidatepage = page->mapping->a_ops->invalidatepage ? : + block_invalidatepage; + (*invalidatepage)(page, offset); } /* @@ -1738,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, sector_t block; sector_t last_block; struct buffer_head *bh, *head; + const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; BUG_ON(!PageLocked(page)); @@ -1745,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; if (!page_has_buffers(page)) { - create_empty_buffers(page, 1 << inode->i_blkbits, + create_empty_buffers(page, blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } @@ -1780,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; @@ -1933,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; @@ -2088,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) fully_mapped = 0; if (iblock < lblock) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) SetPageError(page); @@ -2409,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to, create = 1; if (block_start >= to) create = 0; + map_bh.b_size = blocksize; ret = get_block(inode, block_in_file + block_in_page, &map_bh, create); if (ret) @@ -2669,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping, err = 0; if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; @@ -2755,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; + tmp.b_size = 1 << inode->i_blkbits; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } @@ -3007,7 +3011,7 @@ out: } EXPORT_SYMBOL(try_to_free_buffers); -int block_sync_page(struct page *page) +void block_sync_page(struct page *page) { struct address_space *mapping; @@ -3015,7 +3019,6 @@ int block_sync_page(struct page *page) mapping = page_mapping(page); if (mapping) blk_run_backing_dev(mapping->backing_dev_info, page); - return 0; } /* diff --git a/fs/char_dev.c b/fs/char_dev.c index 8c6eb04d31e2..4e1b849f912f 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -250,7 +250,7 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, } int register_chrdev(unsigned int major, const char *name, - struct file_operations *fops) + const struct file_operations *fops) { struct char_device_struct *cd; struct cdev *cdev; @@ -406,7 +406,7 @@ static void cdev_purge(struct cdev *cdev) * is contain the open that then fills in the correct operations * depending on the special file... */ -struct file_operations def_chr_fops = { +const struct file_operations def_chr_fops = { .open = chrdev_open, }; @@ -473,7 +473,7 @@ struct cdev *cdev_alloc(void) return p; } -void cdev_init(struct cdev *cdev, struct file_operations *fops) +void cdev_init(struct cdev *cdev, const struct file_operations *fops) { memset(cdev, 0, sizeof *cdev); INIT_LIST_HEAD(&cdev->list); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 221b3334b737..4bbc544857bc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -583,7 +583,7 @@ struct inode_operations cifs_symlink_inode_ops = { #endif }; -struct file_operations cifs_file_ops = { +const struct file_operations cifs_file_ops = { .read = do_sync_read, .write = do_sync_write, .readv = generic_file_readv, @@ -607,7 +607,7 @@ struct file_operations cifs_file_ops = { #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; -struct file_operations cifs_file_direct_ops = { +const struct file_operations cifs_file_direct_ops = { /* no mmap, no aio, no readv - BB reevaluate whether they can be done with directio, no cache */ .read = cifs_user_read, @@ -626,7 +626,7 @@ struct file_operations cifs_file_direct_ops = { .dir_notify = cifs_dir_notify, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; -struct file_operations cifs_file_nobrl_ops = { +const struct file_operations cifs_file_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, .readv = generic_file_readv, @@ -649,7 +649,7 @@ struct file_operations cifs_file_nobrl_ops = { #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; -struct file_operations cifs_file_direct_nobrl_ops = { +const struct file_operations cifs_file_direct_nobrl_ops = { /* no mmap, no aio, no readv - BB reevaluate whether they can be done with directio, no cache */ .read = cifs_user_read, @@ -668,7 +668,7 @@ struct file_operations cifs_file_direct_nobrl_ops = { #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; -struct file_operations cifs_dir_ops = { +const struct file_operations cifs_dir_ops = { .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, @@ -738,10 +738,8 @@ cifs_init_request_bufs(void) cERROR(1,("cifs_min_rcv set to maximum (64)")); } - cifs_req_poolp = mempool_create(cifs_min_rcv, - mempool_alloc_slab, - mempool_free_slab, - cifs_req_cachep); + cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, + cifs_req_cachep); if(cifs_req_poolp == NULL) { kmem_cache_destroy(cifs_req_cachep); @@ -771,10 +769,8 @@ cifs_init_request_bufs(void) cFYI(1,("cifs_min_small set to maximum (256)")); } - cifs_sm_req_poolp = mempool_create(cifs_min_small, - mempool_alloc_slab, - mempool_free_slab, - cifs_sm_req_cachep); + cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, + cifs_sm_req_cachep); if(cifs_sm_req_poolp == NULL) { mempool_destroy(cifs_req_poolp); @@ -808,10 +804,8 @@ cifs_init_mids(void) if (cifs_mid_cachep == NULL) return -ENOMEM; - cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */, - mempool_alloc_slab, - mempool_free_slab, - cifs_mid_cachep); + /* 3 is a reasonable minimum number of simultaneous operations */ + cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); if(cifs_mid_poolp == NULL) { kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 821a8eb22559..74f405ae4da3 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -61,10 +61,10 @@ extern struct inode_operations cifs_file_inode_ops; extern struct inode_operations cifs_symlink_inode_ops; /* Functions related to files and directories */ -extern struct file_operations cifs_file_ops; -extern struct file_operations cifs_file_direct_ops; /* if directio mount */ -extern struct file_operations cifs_file_nobrl_ops; -extern struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */ +extern const struct file_operations cifs_file_ops; +extern const struct file_operations cifs_file_direct_ops; /* if directio mount */ +extern const struct file_operations cifs_file_nobrl_ops; +extern const struct file_operations cifs_file_direct_nobrl_ops; /* if directio mount */ extern int cifs_open(struct inode *inode, struct file *file); extern int cifs_close(struct inode *inode, struct file *file); extern int cifs_closedir(struct inode *inode, struct file *file); @@ -76,7 +76,7 @@ extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_fsync(struct file *, struct dentry *, int); extern int cifs_flush(struct file *); extern int cifs_file_mmap(struct file * , struct vm_area_struct *); -extern struct file_operations cifs_dir_ops; +extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); extern int cifs_dir_notify(struct file *, unsigned long arg); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 165d67426381..fb49aef1f2ec 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) return rc; } -/* static int cifs_sync_page(struct page *page) +/* static void cifs_sync_page(struct page *page) { struct address_space *mapping; struct inode *inode; @@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) return 0; inode = mapping->host; if (!inode) - return 0; */ + return; */ /* fill in rpages then result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); +#if 0 if (rc < 0) return rc; return 0; +#endif } */ /* diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ff93a9f81d1c..598eec9778f6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode, if (num_of_bytes < end_of_file) cFYI(1, ("allocation size less than end of file")); - cFYI(1, - ("Size %ld and blocks %ld", - (unsigned long) inode->i_size, inode->i_blocks)); + cFYI(1, ("Size %ld and blocks %llu", + (unsigned long) inode->i_size, + (unsigned long long)inode->i_blocks)); if (S_ISREG(inode->i_mode)) { cFYI(1, ("File inode")); inode->i_op = &cifs_file_inode_ops; diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index edb3b6eb34bc..488bd0d81dcf 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode, if (allocation_size < end_of_file) cFYI(1, ("May be sparse file, allocation less than file size")); - cFYI(1, - ("File Size %ld and blocks %ld and blocksize %ld", - (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks, - tmp_inode->i_blksize)); + cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld", + (unsigned long)tmp_inode->i_size, + (unsigned long long)tmp_inode->i_blocks, + tmp_inode->i_blksize)); if (S_ISREG(tmp_inode->i_mode)) { cFYI(1, ("File inode")); tmp_inode->i_op = &cifs_file_inode_ops; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 54f76de8a686..71f2ea632e53 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -82,7 +82,7 @@ struct inode_operations coda_dir_inode_operations = .setattr = coda_setattr, }; -struct file_operations coda_dir_operations = { +const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = coda_readdir, diff --git a/fs/coda/file.c b/fs/coda/file.c index 146a991d6eb5..7c2642431fa5 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -288,7 +288,7 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) return err; } -struct file_operations coda_file_operations = { +const struct file_operations coda_file_operations = { .llseek = generic_file_llseek, .read = coda_file_read, .write = coda_file_write, diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 127714936c66..214822be87bd 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -36,7 +36,7 @@ struct inode_operations coda_ioctl_inode_operations = .setattr = coda_setattr, }; -struct file_operations coda_ioctl_operations = { +const struct file_operations coda_ioctl_operations = { .owner = THIS_MODULE, .ioctl = coda_pioctl, }; diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 98c74fe2e139..6c6771db36da 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -342,7 +342,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file) } -static struct file_operations coda_psdev_fops = { +static const struct file_operations coda_psdev_fops = { .owner = THIS_MODULE, .read = coda_psdev_read, .write = coda_psdev_write, diff --git a/fs/compat.c b/fs/compat.c index ef5a0771592d..7f8e26ea427c 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1639,15 +1639,6 @@ void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, * This is a virtual copy of sys_select from fs/select.c and probably * should be compared to it from time to time */ -static void *select_bits_alloc(int size) -{ - return kmalloc(6 * size, GFP_KERNEL); -} - -static void select_bits_free(void *bits, int size) -{ - kfree(bits); -} /* * We can actually return ERESTARTSYS instead of EINTR, but I'd @@ -1686,7 +1677,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, */ ret = -ENOMEM; size = FDS_BYTES(n); - bits = select_bits_alloc(size); + bits = kmalloc(6 * size, GFP_KERNEL); if (!bits) goto out_nofds; fds.in = (unsigned long *) bits; @@ -1720,7 +1711,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, compat_set_fd_set(n, exp, fds.res_ex); out: - select_bits_free(bits, size); + kfree(bits); out_nofds: return ret; } diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index f70e46951b37..3f4ff7a242b9 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -72,9 +72,9 @@ extern void configfs_release_fs(void); extern struct rw_semaphore configfs_rename_sem; extern struct super_block * configfs_sb; -extern struct file_operations configfs_dir_operations; -extern struct file_operations configfs_file_operations; -extern struct file_operations bin_fops; +extern const struct file_operations configfs_dir_operations; +extern const struct file_operations configfs_file_operations; +extern const struct file_operations bin_fops; extern struct inode_operations configfs_dir_inode_operations; extern struct inode_operations configfs_symlink_inode_operations; diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index ca60e3abef45..8ed9b06a9828 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1027,7 +1027,7 @@ static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin) return offset; } -struct file_operations configfs_dir_operations = { +const struct file_operations configfs_dir_operations = { .open = configfs_dir_open, .release = configfs_dir_close, .llseek = configfs_dir_lseek, diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 3921920d8716..f499803743e0 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -322,7 +322,7 @@ static int configfs_release(struct inode * inode, struct file * filp) return 0; } -struct file_operations configfs_file_operations = { +const struct file_operations configfs_file_operations = { .read = configfs_read_file, .write = configfs_write_file, .llseek = generic_file_llseek, diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 8ad52f5bf255..9efcc3a164e8 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -22,16 +22,17 @@ #include <linux/cramfs_fs_sb.h> #include <linux/buffer_head.h> #include <linux/vfs.h> +#include <linux/mutex.h> #include <asm/semaphore.h> #include <asm/uaccess.h> static struct super_operations cramfs_ops; static struct inode_operations cramfs_dir_inode_operations; -static struct file_operations cramfs_directory_operations; +static const struct file_operations cramfs_directory_operations; static struct address_space_operations cramfs_aops; -static DECLARE_MUTEX(read_mutex); +static DEFINE_MUTEX(read_mutex); /* These two macros may change in future, to provide better st_ino @@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) memset(sbi, 0, sizeof(struct cramfs_sb_info)); /* Invalidate the read buffers on mount: think disk change.. */ - down(&read_mutex); + mutex_lock(&read_mutex); for (i = 0; i < READ_BUFFERS; i++) buffer_blocknr[i] = -1; /* Read the first block and get the superblock from it */ memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); - up(&read_mutex); + mutex_unlock(&read_mutex); /* Do sanity checks on the superblock */ if (super.magic != CRAMFS_MAGIC) { /* check at 512 byte offset */ - down(&read_mutex); + mutex_lock(&read_mutex); memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); - up(&read_mutex); + mutex_unlock(&read_mutex); if (super.magic != CRAMFS_MAGIC) { if (!silent) printk(KERN_ERR "cramfs: wrong magic\n"); @@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) mode_t mode; int namelen, error; - down(&read_mutex); + mutex_lock(&read_mutex); de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); name = (char *)(de+1); @@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) memcpy(buf, name, namelen); ino = CRAMINO(de); mode = de->mode; - up(&read_mutex); + mutex_unlock(&read_mutex); nextoffset = offset + sizeof(*de) + namelen; for (;;) { if (!namelen) { @@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s unsigned int offset = 0; int sorted; - down(&read_mutex); + mutex_lock(&read_mutex); sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; while (offset < dir->i_size) { struct cramfs_inode *de; @@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s for (;;) { if (!namelen) { - up(&read_mutex); + mutex_unlock(&read_mutex); return ERR_PTR(-EIO); } if (name[namelen-1]) @@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s continue; if (!retval) { struct cramfs_inode entry = *de; - up(&read_mutex); + mutex_unlock(&read_mutex); d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); return NULL; } @@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s if (sorted) break; } - up(&read_mutex); + mutex_unlock(&read_mutex); d_add(dentry, NULL); return NULL; } @@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page) u32 start_offset, compr_len; start_offset = OFFSET(inode) + maxblock*4; - down(&read_mutex); + mutex_lock(&read_mutex); if (page->index) start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); - up(&read_mutex); + mutex_unlock(&read_mutex); pgdata = kmap(page); if (compr_len == 0) ; /* hole */ else { - down(&read_mutex); + mutex_lock(&read_mutex); bytes_filled = cramfs_uncompress_block(pgdata, PAGE_CACHE_SIZE, cramfs_read(sb, start_offset, compr_len), compr_len); - up(&read_mutex); + mutex_unlock(&read_mutex); } } else pgdata = kmap(page); @@ -511,7 +512,7 @@ static struct address_space_operations cramfs_aops = { /* * A directory can only readdir */ -static struct file_operations cramfs_directory_operations = { +static const struct file_operations cramfs_directory_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = cramfs_readdir, diff --git a/fs/dcache.c b/fs/dcache.c index 939584648504..19458d399502 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -34,9 +34,8 @@ #include <linux/swap.h> #include <linux/bootmem.h> -/* #define DCACHE_DEBUG 1 */ -int sysctl_vfs_cache_pressure = 100; +int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); @@ -44,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; EXPORT_SYMBOL(dcache_lock); -static kmem_cache_t *dentry_cache; +static kmem_cache_t *dentry_cache __read_mostly; #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) @@ -59,9 +58,9 @@ static kmem_cache_t *dentry_cache; #define D_HASHBITS d_hash_shift #define D_HASHMASK d_hash_mask -static unsigned int d_hash_mask; -static unsigned int d_hash_shift; -static struct hlist_head *dentry_hashtable; +static unsigned int d_hash_mask __read_mostly; +static unsigned int d_hash_shift __read_mostly; +static struct hlist_head *dentry_hashtable __read_mostly; static LIST_HEAD(dentry_unused); /* Statistics gathering. */ @@ -603,10 +602,6 @@ resume: */ if (!list_empty(&dentry->d_subdirs)) { this_parent = dentry; -#ifdef DCACHE_DEBUG -printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n", -dentry->d_parent->d_name.name, dentry->d_name.name, found); -#endif goto repeat; } } @@ -616,10 +611,6 @@ dentry->d_parent->d_name.name, dentry->d_name.name, found); if (this_parent != parent) { next = this_parent->d_u.d_child.next; this_parent = this_parent->d_parent; -#ifdef DCACHE_DEBUG -printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n", -this_parent->d_parent->d_name.name, this_parent->d_name.name, found); -#endif goto resume; } out: @@ -798,7 +789,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name) void d_instantiate(struct dentry *entry, struct inode * inode) { - if (!list_empty(&entry->d_alias)) BUG(); + BUG_ON(!list_empty(&entry->d_alias)); spin_lock(&dcache_lock); if (inode) list_add(&entry->d_alias, &inode->i_dentry); @@ -1719,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages) } /* SLAB cache for __getname() consumers */ -kmem_cache_t *names_cachep; +kmem_cache_t *names_cachep __read_mostly; /* SLAB cache for file structures */ -kmem_cache_t *filp_cachep; +kmem_cache_t *filp_cachep __read_mostly; EXPORT_SYMBOL(d_genocide); diff --git a/fs/dcookies.c b/fs/dcookies.c index f8274a8f83bd..8749339bf4f6 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -23,6 +23,7 @@ #include <linux/mm.h> #include <linux/errno.h> #include <linux/dcookies.h> +#include <linux/mutex.h> #include <asm/uaccess.h> /* The dcookies are allocated from a kmem_cache and @@ -36,10 +37,10 @@ struct dcookie_struct { }; static LIST_HEAD(dcookie_users); -static DECLARE_MUTEX(dcookie_sem); -static kmem_cache_t * dcookie_cache; -static struct list_head * dcookie_hashtable; -static size_t hash_size; +static DEFINE_MUTEX(dcookie_mutex); +static kmem_cache_t *dcookie_cache __read_mostly; +static struct list_head *dcookie_hashtable __read_mostly; +static size_t hash_size __read_mostly; static inline int is_live(void) { @@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, int err = 0; struct dcookie_struct * dcs; - down(&dcookie_sem); + mutex_lock(&dcookie_mutex); if (!is_live()) { err = -EINVAL; @@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, *cookie = dcookie_value(dcs); out: - up(&dcookie_sem); + mutex_unlock(&dcookie_mutex); return err; } @@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - down(&dcookie_sem); + mutex_lock(&dcookie_mutex); if (!is_live()) { err = -EINVAL; @@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len) out_free: kfree(kbuf); out: - up(&dcookie_sem); + mutex_unlock(&dcookie_mutex); return err; } @@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void) { struct dcookie_user * user; - down(&dcookie_sem); + mutex_lock(&dcookie_mutex); user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); if (!user) @@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void) list_add(&user->next, &dcookie_users); out: - up(&dcookie_sem); + mutex_unlock(&dcookie_mutex); return user; out_free: kfree(user); @@ -313,7 +314,7 @@ out_free: void dcookie_unregister(struct dcookie_user * user) { - down(&dcookie_sem); + mutex_lock(&dcookie_mutex); list_del(&user->next); kfree(user); @@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user) if (!is_live()) dcookie_exit(); - up(&dcookie_sem); + mutex_unlock(&dcookie_mutex); } EXPORT_SYMBOL_GPL(dcookie_register); diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 40c4fc973fad..66a505422e5c 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -39,7 +39,7 @@ static int default_open(struct inode *inode, struct file *file) return 0; } -struct file_operations debugfs_file_operations = { +const struct file_operations debugfs_file_operations = { .read = default_read_file, .write = default_write_file, .open = default_open, @@ -213,7 +213,7 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf, return count; } -static struct file_operations fops_bool = { +static const struct file_operations fops_bool = { .read = read_file_bool, .write = write_file_bool, .open = default_open, diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index d4f1a2cddd47..85d166cdcae4 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -191,7 +191,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode, */ struct dentry *debugfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, - struct file_operations *fops) + const struct file_operations *fops) { struct dentry *dentry = NULL; int error; diff --git a/fs/devfs/base.c b/fs/devfs/base.c index b621521e09d4..52f5059c4f31 100644 --- a/fs/devfs/base.c +++ b/fs/devfs/base.c @@ -856,14 +856,14 @@ static int devfsd_close(struct inode *inode, struct file *file); #ifdef CONFIG_DEVFS_DEBUG static ssize_t stat_read(struct file *file, char __user *buf, size_t len, loff_t * ppos); -static struct file_operations stat_fops = { +static const struct file_operations stat_fops = { .open = nonseekable_open, .read = stat_read, }; #endif /* Devfs daemon file operations */ -static struct file_operations devfsd_fops = { +static const struct file_operations devfsd_fops = { .open = nonseekable_open, .read = devfsd_read, .ioctl = devfsd_ioctl, @@ -1842,8 +1842,8 @@ static int try_modload(struct devfs_entry *parent, struct fs_info *fs_info, static struct inode_operations devfs_iops; static struct inode_operations devfs_dir_iops; -static struct file_operations devfs_fops; -static struct file_operations devfs_dir_fops; +static const struct file_operations devfs_fops; +static const struct file_operations devfs_dir_fops; static struct inode_operations devfs_symlink_iops; static int devfs_notify_change(struct dentry *dentry, struct iattr *iattr) @@ -2061,11 +2061,11 @@ static int devfs_open(struct inode *inode, struct file *file) return err; } /* End Function devfs_open */ -static struct file_operations devfs_fops = { +static const struct file_operations devfs_fops = { .open = devfs_open, }; -static struct file_operations devfs_dir_fops = { +static const struct file_operations devfs_dir_fops = { .read = generic_read_dir, .readdir = devfs_readdir, }; diff --git a/fs/direct-io.c b/fs/direct-io.c index 235ed8d1f11e..9d1d2aa73e42 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -86,12 +86,12 @@ struct dio { unsigned first_block_in_page; /* doesn't change, Used only once */ int boundary; /* prev block is at a boundary */ int reap_counter; /* rate limit reaping */ - get_blocks_t *get_blocks; /* block mapping function */ + get_block_t *get_block; /* block mapping function */ dio_iodone_t *end_io; /* IO completion function */ sector_t final_block_in_bio; /* current final block in bio + 1 */ sector_t next_block_for_io; /* next block to be put under IO, in dio_blocks units */ - struct buffer_head map_bh; /* last get_blocks() result */ + struct buffer_head map_bh; /* last get_block() result */ /* * Deferred addition of a page to the dio. These variables are @@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio) /* * Called when all DIO BIO I/O has been completed - let the filesystem - * know, if it registered an interest earlier via get_blocks. Pass the + * know, if it registered an interest earlier via get_block. Pass the * private field of the map buffer_head so that filesystems can use it - * to hold additional state between get_blocks calls and dio_complete. + * to hold additional state between get_block calls and dio_complete. */ static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) { @@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio) * The fs is allowed to map lots of blocks at once. If it wants to do that, * it uses the passed inode-relative block number as the file offset, as usual. * - * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io + * get_block() is passed the number of i_blkbits-sized blocks which direct_io * has remaining to do. The fs should not map more than this number of blocks. * * If the fs has mapped a lot of blocks, it should populate bh->b_size to @@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio) * In the case of filesystem holes: the fs may return an arbitrarily-large * hole by returning an appropriate value in b_size and by clearing * buffer_mapped(). However the direct-io code will only process holes one - * block at a time - it will repeatedly call get_blocks() as it walks the hole. + * block at a time - it will repeatedly call get_block() as it walks the hole. */ static int get_more_blocks(struct dio *dio) { @@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio) * at a higher level for inside-i_size block-instantiating * writes. */ - ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count, + map_bh->b_size = fs_count << dio->blkbits; + ret = (*dio->get_block)(dio->inode, fs_startblk, map_bh, create); } return ret; @@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end) * happily perform page-sized but 512-byte aligned IOs. It is important that * blockdev IO be able to have fine alignment and large sizes. * - * So what we do is to permit the ->get_blocks function to populate bh.b_size + * So what we do is to permit the ->get_block function to populate bh.b_size * with the size of IO which is permitted at this offset and this i_blkbits. * * For best results, the blockdev should be set up with 512-byte i_blkbits and - * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives + * it should set b_size to PAGE_SIZE or more inside get_block(). This gives * fine alignment but still allows this function to work in PAGE_SIZE units. */ static int do_direct_IO(struct dio *dio) @@ -947,7 +948,7 @@ out: static ssize_t direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs, - unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io, + unsigned blkbits, get_block_t get_block, dio_iodone_t end_io, struct dio *dio) { unsigned long user_addr; @@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, dio->boundary = 0; dio->reap_counter = 0; - dio->get_blocks = get_blocks; + dio->get_block = get_block; dio->end_io = end_io; dio->map_bh.b_private = NULL; dio->final_block_in_bio = -1; @@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, - unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, int dio_lock_type) { int seg; @@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, (end > i_size_read(inode))); retval = direct_io_worker(rw, iocb, inode, iov, offset, - nr_segs, blkbits, get_blocks, end_io, dio); + nr_segs, blkbits, get_block, end_io, dio); if (rw == READ && dio_lock_type == DIO_LOCKING) release_i_mutex = 0; diff --git a/fs/dnotify.c b/fs/dnotify.c index f3b540dd5d11..f932591df5a4 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c @@ -21,9 +21,9 @@ #include <linux/spinlock.h> #include <linux/slab.h> -int dir_notify_enable = 1; +int dir_notify_enable __read_mostly = 1; -static kmem_cache_t *dn_cache; +static kmem_cache_t *dn_cache __read_mostly; static void redo_inode_mask(struct inode *inode) { diff --git a/fs/efs/dir.c b/fs/efs/dir.c index 777c614ff360..17f5b2d3c16a 100644 --- a/fs/efs/dir.c +++ b/fs/efs/dir.c @@ -10,7 +10,7 @@ static int efs_readdir(struct file *, void *, filldir_t); -struct file_operations efs_dir_operations = { +const struct file_operations efs_dir_operations = { .read = generic_read_dir, .readdir = efs_readdir, }; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a0f682cdd03e..242fe1a66ce5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -281,16 +281,16 @@ static struct mutex epmutex; static struct poll_safewake psw; /* Slab cache used to allocate "struct epitem" */ -static kmem_cache_t *epi_cache; +static kmem_cache_t *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ -static kmem_cache_t *pwq_cache; +static kmem_cache_t *pwq_cache __read_mostly; /* Virtual fs used to allocate inodes for eventpoll files */ -static struct vfsmount *eventpoll_mnt; +static struct vfsmount *eventpoll_mnt __read_mostly; /* File callbacks that implement the eventpoll file behaviour */ -static struct file_operations eventpoll_fops = { +static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_close, .poll = ep_eventpoll_poll }; diff --git a/fs/exec.c b/fs/exec.c index 995cba3c62b8..c7397c46ad6d 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk) * synchronize with any firing (by calling del_timer_sync) * before we can safely let the old group leader die. */ - sig->real_timer.data = current; + sig->tsk = current; spin_unlock_irq(lock); if (hrtimer_cancel(&sig->real_timer)) hrtimer_restart(&sig->real_timer); diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index b3dbd716cd3a..d672aa9f4061 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -416,8 +416,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, lock_page(page); err = page->mapping->a_ops->prepare_write(NULL, page, from, to); - if (err) - BUG(); + BUG_ON(err); de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type (de, inode); err = ext2_commit_chunk(page, from, to); @@ -554,8 +553,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) from = (char*)pde - (char*)page_address(page); lock_page(page); err = mapping->a_ops->prepare_write(NULL, page, from, to); - if (err) - BUG(); + BUG_ON(err); if (pde) pde->rec_len = cpu_to_le16(to-from); dir->inode = 0; @@ -660,7 +658,7 @@ not_empty: return 0; } -struct file_operations ext2_dir_operations = { +const struct file_operations ext2_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = ext2_readdir, diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 11035ac7986f..9f74a62be555 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -154,12 +154,12 @@ extern void ext2_write_super (struct super_block *); */ /* dir.c */ -extern struct file_operations ext2_dir_operations; +extern const struct file_operations ext2_dir_operations; /* file.c */ extern struct inode_operations ext2_file_inode_operations; -extern struct file_operations ext2_file_operations; -extern struct file_operations ext2_xip_file_operations; +extern const struct file_operations ext2_file_operations; +extern const struct file_operations ext2_xip_file_operations; /* inode.c */ extern struct address_space_operations ext2_aops; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index a484412fc782..509cceca04db 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -39,7 +39,7 @@ static int ext2_release_file (struct inode * inode, struct file * filp) * We have mostly NULL's here: the current defaults are ok for * the ext2 filesystem. */ -struct file_operations ext2_file_operations = { +const struct file_operations ext2_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, @@ -56,7 +56,7 @@ struct file_operations ext2_file_operations = { }; #ifdef CONFIG_EXT2_FS_XIP -struct file_operations ext2_xip_file_operations = { +const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, .read = xip_file_read, .write = xip_file_write, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index a717837f272e..04af9c45dce2 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping,block,ext2_get_block); } -static int -ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) -{ - int ret; - - ret = ext2_get_block(inode, iblock, bh_result, create); - if (ret == 0) - bh_result->b_size = (1 << inode->i_blkbits); - return ret; -} - static ssize_t ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) @@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, struct inode *inode = file->f_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, ext2_get_blocks, NULL); + offset, nr_segs, ext2_get_block, NULL); } static int diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 46623f77666b..77927d6938f6 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh) */ static int ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, - struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) + struct buffer_head *bitmap_bh, int goal, + unsigned long *count, struct ext3_reserve_window *my_rsv) { int group_first_block, start, end; + unsigned long num = 0; /* we do allocation within the reservation window if we have a window */ if (my_rsv) { @@ -713,8 +715,18 @@ repeat: goto fail_access; goto repeat; } - return goal; + num++; + goal++; + while (num < *count && goal < end + && ext3_test_allocatable(goal, bitmap_bh) + && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { + num++; + goal++; + } + *count = num; + return goal - num; fail_access: + *count = num; return -1; } @@ -999,6 +1011,31 @@ retry: goto retry; } +static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, + struct super_block *sb, int size) +{ + struct ext3_reserve_window_node *next_rsv; + struct rb_node *next; + spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; + + if (!spin_trylock(rsv_lock)) + return; + + next = rb_next(&my_rsv->rsv_node); + + if (!next) + my_rsv->rsv_end += size; + else { + next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); + + if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) + my_rsv->rsv_end += size; + else + my_rsv->rsv_end = next_rsv->rsv_start - 1; + } + spin_unlock(rsv_lock); +} + /* * This is the main function used to allocate a new block and its reservation * window. @@ -1024,11 +1061,12 @@ static int ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, unsigned int group, struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window_node * my_rsv, - int *errp) + unsigned long *count, int *errp) { unsigned long group_first_block; int ret = 0; int fatal; + unsigned long num = *count; *errp = 0; @@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, * or last attempt to allocate a block with reservation turned on failed */ if (my_rsv == NULL ) { - ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); + ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, + goal, count, NULL); goto out; } /* @@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, while (1) { if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { + if (my_rsv->rsv_goal_size < *count) + my_rsv->rsv_goal_size = *count; ret = alloc_new_reservation(my_rsv, goal, sb, group, bitmap_bh); if (ret < 0) @@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) goal = -1; - } + } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count) + try_to_extend_reservation(my_rsv, sb, + *count-my_rsv->rsv_end + goal - 1); + if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) || (my_rsv->rsv_end < group_first_block)) BUG(); ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, - &my_rsv->rsv_window); + &num, &my_rsv->rsv_window); if (ret >= 0) { - my_rsv->rsv_alloc_hit++; + my_rsv->rsv_alloc_hit += num; + *count = num; break; /* succeed */ } + num = *count; } out: if (ret >= 0) { @@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) * bitmap, and then for any free bit if that fails. * This function also updates quota and i_blocks field. */ -int ext3_new_block(handle_t *handle, struct inode *inode, - unsigned long goal, int *errp) +int ext3_new_blocks(handle_t *handle, struct inode *inode, + unsigned long goal, unsigned long *count, int *errp) { struct buffer_head *bitmap_bh = NULL; struct buffer_head *gdp_bh; @@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, static int goal_hits, goal_attempts; #endif unsigned long ngroups; + unsigned long num = *count; *errp = -ENOSPC; sb = inode->i_sb; @@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, /* * Check quota for allocation of this block. */ - if (DQUOT_ALLOC_BLOCK(inode, 1)) { + if (DQUOT_ALLOC_BLOCK(inode, num)) { *errp = -EDQUOT; return 0; } @@ -1244,7 +1291,7 @@ retry: if (!bitmap_bh) goto io_error; ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, - bitmap_bh, ret_block, my_rsv, &fatal); + bitmap_bh, ret_block, my_rsv, &num, &fatal); if (fatal) goto out; if (ret_block >= 0) @@ -1281,7 +1328,7 @@ retry: if (!bitmap_bh) goto io_error; ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, - bitmap_bh, -1, my_rsv, &fatal); + bitmap_bh, -1, my_rsv, &num, &fatal); if (fatal) goto out; if (ret_block >= 0) @@ -1316,13 +1363,15 @@ allocated: target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) + le32_to_cpu(es->s_first_data_block); - if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || - target_block == le32_to_cpu(gdp->bg_inode_bitmap) || + if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) || + in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) || in_range(target_block, le32_to_cpu(gdp->bg_inode_table), + EXT3_SB(sb)->s_itb_per_group) || + in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table), EXT3_SB(sb)->s_itb_per_group)) ext3_error(sb, "ext3_new_block", "Allocating block in system zone - " - "block = %u", target_block); + "blocks from %u, length %lu", target_block, num); performed_allocation = 1; @@ -1341,10 +1390,14 @@ allocated: jbd_lock_bh_state(bitmap_bh); spin_lock(sb_bgl_lock(sbi, group_no)); if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { - if (ext3_test_bit(ret_block, - bh2jh(bitmap_bh)->b_committed_data)) { - printk("%s: block was unexpectedly set in " - "b_committed_data\n", __FUNCTION__); + int i; + + for (i = 0; i < num; i++) { + if (ext3_test_bit(ret_block, + bh2jh(bitmap_bh)->b_committed_data)) { + printk("%s: block was unexpectedly set in " + "b_committed_data\n", __FUNCTION__); + } } } ext3_debug("found bit %d\n", ret_block); @@ -1355,7 +1408,7 @@ allocated: /* ret_block was blockgroup-relative. Now it becomes fs-relative */ ret_block = target_block; - if (ret_block >= le32_to_cpu(es->s_blocks_count)) { + if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { ext3_error(sb, "ext3_new_block", "block(%d) >= blocks count(%d) - " "block_group = %d, es == %p ", ret_block, @@ -1373,9 +1426,9 @@ allocated: spin_lock(sb_bgl_lock(sbi, group_no)); gdp->bg_free_blocks_count = - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); + cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); spin_unlock(sb_bgl_lock(sbi, group_no)); - percpu_counter_mod(&sbi->s_freeblocks_counter, -1); + percpu_counter_mod(&sbi->s_freeblocks_counter, -num); BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); err = ext3_journal_dirty_metadata(handle, gdp_bh); @@ -1388,6 +1441,8 @@ allocated: *errp = 0; brelse(bitmap_bh); + DQUOT_FREE_BLOCK(inode, *count-num); + *count = num; return ret_block; io_error: @@ -1401,11 +1456,19 @@ out: * Undo the block allocation */ if (!performed_allocation) - DQUOT_FREE_BLOCK(inode, 1); + DQUOT_FREE_BLOCK(inode, *count); brelse(bitmap_bh); return 0; } +int ext3_new_block(handle_t *handle, struct inode *inode, + unsigned long goal, int *errp) +{ + unsigned long count = 1; + + return ext3_new_blocks(handle, inode, goal, &count, errp); +} + unsigned long ext3_count_free_blocks(struct super_block *sb) { unsigned long desc_count; diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 773459164bb2..f37528ed222e 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -39,7 +39,7 @@ static int ext3_dx_readdir(struct file * filp, static int ext3_release_dir (struct inode * inode, struct file * filp); -struct file_operations ext3_dir_operations = { +const struct file_operations ext3_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = ext3_readdir, /* we take BKL. needed?*/ @@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp, struct buffer_head *bh = NULL; map_bh.b_state = 0; - err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); - if (!err) { + err = ext3_get_blocks_handle(NULL, inode, blk, 1, + &map_bh, 0, 0); + if (err > 0) { page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, &filp->f_ra, filp, diff --git a/fs/ext3/file.c b/fs/ext3/file.c index 59098ea56711..783a796220bb 100644 --- a/fs/ext3/file.c +++ b/fs/ext3/file.c @@ -105,7 +105,7 @@ force_commit: return ret; } -struct file_operations ext3_file_operations = { +const struct file_operations ext3_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2c361377e0a5..48ae0339af17 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode); /* * Test whether an inode is a fast symlink. */ -static inline int ext3_inode_is_fast_symlink(struct inode *inode) +static int ext3_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT3_I(inode)->i_file_acl ? (inode->i_sb->s_blocksize >> 9) : 0; - return (S_ISLNK(inode->i_mode) && - inode->i_blocks - ea_blocks == 0); + return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } -/* The ext3 forget function must perform a revoke if we are freeing data +/* + * The ext3 forget function must perform a revoke if we are freeing data * which has been journaled. Metadata (eg. indirect blocks) must be * revoked in all cases. * @@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode) * but there may still be a record of it in the journal, and that record * still needs to be revoked. */ - -int ext3_forget(handle_t *handle, int is_metadata, - struct inode *inode, struct buffer_head *bh, - int blocknr) +int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, + struct buffer_head *bh, int blocknr) { int err; @@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata, } /* - * Work out how many blocks we need to progress with the next chunk of a + * Work out how many blocks we need to proceed with the next chunk of a * truncate transaction. */ - static unsigned long blocks_for_truncate(struct inode *inode) { unsigned long needed; @@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode) * extend fails, we need to propagate the failure up and restart the * transaction in the top-level truncate loop. --sct */ - static handle_t *start_transaction(struct inode *inode) { handle_t *result; @@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode) handle = start_transaction(inode); if (IS_ERR(handle)) { - /* If we're going to skip the normal cleanup, we still - * need to make sure that the in-core orphan linked list - * is properly cleaned up. */ + /* + * If we're going to skip the normal cleanup, we still need to + * make sure that the in-core orphan linked list is properly + * cleaned up. + */ ext3_orphan_del(NULL, inode); goto no_delete; } @@ -235,16 +233,6 @@ no_delete: clear_inode(inode); /* We must guarantee clearing of inode... */ } -static int ext3_alloc_block (handle_t *handle, - struct inode * inode, unsigned long goal, int *err) -{ - unsigned long result; - - result = ext3_new_block(handle, inode, goal, err); - return result; -} - - typedef struct { __le32 *p; __le32 key; @@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) p->bh = bh; } -static inline int verify_chain(Indirect *from, Indirect *to) +static int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; @@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode, offsets[n++] = i_block & (ptrs - 1); final = ptrs; } else { - ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); + ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); } if (boundary) - *boundary = (i_block & (ptrs - 1)) == (final - 1); + *boundary = final - 1 - (i_block & (ptrs - 1)); return n; } @@ -419,7 +407,6 @@ no_block: * * Caller must make sure that @ind is valid and will stay that way. */ - static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) { struct ext3_inode_info *ei = EXT3_I(inode); @@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) unsigned long colour; /* Try to find previous block */ - for (p = ind->p - 1; p >= start; p--) + for (p = ind->p - 1; p >= start; p--) { if (*p) return le32_to_cpu(*p); + } /* No such thing, so let's try location of indirect block */ if (ind->bh) return ind->bh->b_blocknr; /* - * It is going to be refered from inode itself? OK, just put it into - * the same cylinder group then. + * It is going to be referred to from the inode itself? OK, just put it + * into the same cylinder group then. */ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); @@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) static unsigned long ext3_find_goal(struct inode *inode, long block, Indirect chain[4], Indirect *partial) { - struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; + struct ext3_block_alloc_info *block_i; + + block_i = EXT3_I(inode)->i_block_alloc_info; /* * try the heuristic for sequential allocation, @@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, } /** + * ext3_blks_to_allocate: Look up the block map and count the number + * of direct blocks need to be allocated for the given branch. + * + * @branch: chain of indirect blocks + * @k: number of blocks need for indirect blocks + * @blks: number of data blocks to be mapped. + * @blocks_to_boundary: the offset in the indirect block + * + * return the total number of blocks to be allocate, including the + * direct and indirect blocks. + */ +static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, + int blocks_to_boundary) +{ + unsigned long count = 0; + + /* + * Simple case, [t,d]Indirect block(s) has not allocated yet + * then it's clear blocks on that path have not allocated + */ + if (k > 0) { + /* right now we don't handle cross boundary allocation */ + if (blks < blocks_to_boundary + 1) + count += blks; + else + count += blocks_to_boundary + 1; + return count; + } + + count++; + while (count < blks && count <= blocks_to_boundary && + le32_to_cpu(*(branch[0].p + count)) == 0) { + count++; + } + return count; +} + +/** + * ext3_alloc_blocks: multiple allocate blocks needed for a branch + * @indirect_blks: the number of blocks need to allocate for indirect + * blocks + * + * @new_blocks: on return it will store the new block numbers for + * the indirect blocks(if needed) and the first direct block, + * @blks: on return it will store the total number of allocated + * direct blocks + */ +static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, + unsigned long goal, int indirect_blks, int blks, + unsigned long long new_blocks[4], int *err) +{ + int target, i; + unsigned long count = 0; + int index = 0; + unsigned long current_block = 0; + int ret = 0; + + /* + * Here we try to allocate the requested multiple blocks at once, + * on a best-effort basis. + * To build a branch, we should allocate blocks for + * the indirect blocks(if not allocated yet), and at least + * the first direct block of this branch. That's the + * minimum number of blocks need to allocate(required) + */ + target = blks + indirect_blks; + + while (1) { + count = target; + /* allocating blocks for indirect blocks and direct blocks */ + current_block = ext3_new_blocks(handle,inode,goal,&count,err); + if (*err) + goto failed_out; + + target -= count; + /* allocate blocks for indirect blocks */ + while (index < indirect_blks && count) { + new_blocks[index++] = current_block++; + count--; + } + + if (count > 0) + break; + } + + /* save the new block number for the first direct block */ + new_blocks[index] = current_block; + + /* total number of blocks allocated for direct blocks */ + ret = count; + *err = 0; + return ret; +failed_out: + for (i = 0; i <index; i++) + ext3_free_blocks(handle, inode, new_blocks[i], 1); + return ret; +} + +/** * ext3_alloc_branch - allocate and set up a chain of blocks. * @inode: owner - * @num: depth of the chain (number of blocks to allocate) + * @indirect_blks: number of allocated indirect blocks + * @blks: number of allocated direct blocks * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * - * This function allocates @num blocks, zeroes out all but the last one, + * This function allocates blocks, zeroes out all but the last one, * links them into chain and (if we are synchronous) writes them to disk. * In other words, it prepares a branch that can be spliced onto the * inode. It stores the information about that chain in the branch[], in @@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ - static int ext3_alloc_branch(handle_t *handle, struct inode *inode, - int num, - unsigned long goal, - int *offsets, - Indirect *branch) + int indirect_blks, int *blks, unsigned long goal, + int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; - int n = 0, keys = 0; + int i, n = 0; int err = 0; - int i; - int parent = ext3_alloc_block(handle, inode, goal, &err); - - branch[0].key = cpu_to_le32(parent); - if (parent) { - for (n = 1; n < num; n++) { - struct buffer_head *bh; - /* Allocate the next block */ - int nr = ext3_alloc_block(handle, inode, parent, &err); - if (!nr) - break; - branch[n].key = cpu_to_le32(nr); + struct buffer_head *bh; + int num; + unsigned long long new_blocks[4]; + unsigned long long current_block; - /* - * Get buffer_head for parent block, zero it out - * and set the pointer to new one, then send - * parent to disk. - */ - bh = sb_getblk(inode->i_sb, parent); - if (!bh) - break; - keys = n+1; - branch[n].bh = bh; - lock_buffer(bh); - BUFFER_TRACE(bh, "call get_create_access"); - err = ext3_journal_get_create_access(handle, bh); - if (err) { - unlock_buffer(bh); - brelse(bh); - break; - } + num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, + *blks, new_blocks, &err); + if (err) + return err; - memset(bh->b_data, 0, blocksize); - branch[n].p = (__le32*) bh->b_data + offsets[n]; - *branch[n].p = branch[n].key; - BUFFER_TRACE(bh, "marking uptodate"); - set_buffer_uptodate(bh); + branch[0].key = cpu_to_le32(new_blocks[0]); + /* + * metadata blocks and data blocks are allocated. + */ + for (n = 1; n <= indirect_blks; n++) { + /* + * Get buffer_head for parent block, zero it out + * and set the pointer to new one, then send + * parent to disk. + */ + bh = sb_getblk(inode->i_sb, new_blocks[n-1]); + branch[n].bh = bh; + lock_buffer(bh); + BUFFER_TRACE(bh, "call get_create_access"); + err = ext3_journal_get_create_access(handle, bh); + if (err) { unlock_buffer(bh); + brelse(bh); + goto failed; + } - BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); - err = ext3_journal_dirty_metadata(handle, bh); - if (err) - break; - - parent = nr; + memset(bh->b_data, 0, blocksize); + branch[n].p = (__le32 *) bh->b_data + offsets[n]; + branch[n].key = cpu_to_le32(new_blocks[n]); + *branch[n].p = branch[n].key; + if ( n == indirect_blks) { + current_block = new_blocks[n]; + /* + * End of chain, update the last new metablock of + * the chain to point to the new allocated + * data blocks numbers + */ + for (i=1; i < num; i++) + *(branch[n].p + i) = cpu_to_le32(++current_block); } - } - if (n == num) - return 0; + BUFFER_TRACE(bh, "marking uptodate"); + set_buffer_uptodate(bh); + unlock_buffer(bh); + BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); + err = ext3_journal_dirty_metadata(handle, bh); + if (err) + goto failed; + } + *blks = num; + return err; +failed: /* Allocation failed, free what we already allocated */ - for (i = 1; i < keys; i++) { + for (i = 1; i <= n ; i++) { BUFFER_TRACE(branch[i].bh, "call journal_forget"); ext3_journal_forget(handle, branch[i].bh); } - for (i = 0; i < keys; i++) - ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); + for (i = 0; i <indirect_blks; i++) + ext3_free_blocks(handle, inode, new_blocks[i], 1); + + ext3_free_blocks(handle, inode, new_blocks[i], num); + return err; } /** - * ext3_splice_branch - splice the allocated branch onto inode. - * @inode: owner - * @block: (logical) number of block we are adding - * @chain: chain of indirect blocks (with a missing link - see - * ext3_alloc_branch) - * @where: location of missing link - * @num: number of blocks we are adding - * - * This function fills the missing link and does all housekeeping needed in - * inode (->i_blocks, etc.). In case of success we end up with the full - * chain to new block and return 0. + * ext3_splice_branch - splice the allocated branch onto inode. + * @inode: owner + * @block: (logical) number of block we are adding + * @chain: chain of indirect blocks (with a missing link - see + * ext3_alloc_branch) + * @where: location of missing link + * @num: number of indirect blocks we are adding + * @blks: number of direct blocks we are adding + * + * This function fills the missing link and does all housekeeping needed in + * inode (->i_blocks, etc.). In case of success we end up with the full + * chain to new block and return 0. */ - -static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, - Indirect chain[4], Indirect *where, int num) +static int ext3_splice_branch(handle_t *handle, struct inode *inode, + long block, Indirect *where, int num, int blks) { int i; int err = 0; - struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; + struct ext3_block_alloc_info *block_i; + unsigned long current_block; + block_i = EXT3_I(inode)->i_block_alloc_info; /* * If we're splicing into a [td]indirect block (as opposed to the * inode) then we need to get write access to the [td]indirect block @@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, *where->p = where->key; /* + * Update the host buffer_head or inode to point to more just allocated + * direct blocks blocks + */ + if (num == 0 && blks > 1) { + current_block = le32_to_cpu(where->key + 1); + for (i = 1; i < blks; i++) + *(where->p + i ) = cpu_to_le32(current_block++); + } + + /* * update the most recently allocated logical & physical block * in i_block_alloc_info, to assist find the proper goal block for next * allocation */ if (block_i) { - block_i->last_alloc_logical_block = block; - block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); + block_i->last_alloc_logical_block = block + blks - 1; + block_i->last_alloc_physical_block = + le32_to_cpu(where[num].key + blks - 1); } /* We are done with atomic stuff, now do the rest of housekeeping */ @@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, /* had we spliced it onto indirect block? */ if (where->bh) { /* - * akpm: If we spliced it onto an indirect block, we haven't + * If we spliced it onto an indirect block, we haven't * altered the inode. Note however that if it is being spliced * onto an indirect block at the very end of the file (the * file is growing) then we *will* alter the inode to reflect @@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, return err; err_out: - for (i = 1; i < num; i++) { + for (i = 1; i <= num; i++) { BUFFER_TRACE(where[i].bh, "call journal_forget"); ext3_journal_forget(handle, where[i].bh); + ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); } + ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); + return err; } @@ -666,26 +779,33 @@ err_out: * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. * - * akpm: `handle' can be NULL if create == 0. + * `handle' can be NULL if create == 0. * * The BKL may not be held on entry here. Be sure to take it early. + * return > 0, # of blocks mapped or allocated. + * return = 0, if plain lookup failed. + * return < 0, error case. */ - -int -ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create, int extend_disksize) +int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, + sector_t iblock, unsigned long maxblocks, + struct buffer_head *bh_result, + int create, int extend_disksize) { int err = -EIO; int offsets[4]; Indirect chain[4]; Indirect *partial; unsigned long goal; - int left; - int boundary = 0; - const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); + int indirect_blks; + int blocks_to_boundary = 0; + int depth; struct ext3_inode_info *ei = EXT3_I(inode); + int count = 0; + unsigned long first_block = 0; + J_ASSERT(handle != NULL || create == 0); + depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); if (depth == 0) goto out; @@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, /* Simplest case - block found, no allocation needed */ if (!partial) { + first_block = chain[depth - 1].key; clear_buffer_new(bh_result); - goto got_it; + count++; + /*map more blocks*/ + while (count < maxblocks && count <= blocks_to_boundary) { + if (!verify_chain(chain, partial)) { + /* + * Indirect block might be removed by + * truncate while we were reading it. + * Handling of that case: forget what we've + * got now. Flag the err as EAGAIN, so it + * will reread. + */ + err = -EAGAIN; + count = 0; + break; + } + if (le32_to_cpu(*(chain[depth-1].p+count) == + (first_block + count))) + count++; + else + break; + } + if (err != -EAGAIN) + goto got_it; } /* Next simple case - plain lookup or failed read of indirect block */ @@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { + count++; mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; @@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, goal = ext3_find_goal(inode, iblock, chain, partial); - left = (chain + depth) - partial; + /* the number of blocks need to allocate for [d,t]indirect blocks */ + indirect_blks = (chain + depth) - partial - 1; /* + * Next look up the indirect map to count the totoal number of + * direct blocks to allocate for this branch. + */ + count = ext3_blks_to_allocate(partial, indirect_blks, + maxblocks, blocks_to_boundary); + /* * Block out ext3_truncate while we alter the tree */ - err = ext3_alloc_branch(handle, inode, left, goal, + err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, offsets + (partial - chain), partial); /* @@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) - err = ext3_splice_branch(handle, inode, iblock, chain, - partial, left); + err = ext3_splice_branch(handle, inode, iblock, + partial, indirect_blks, count); /* * i_disksize growing is protected by truncate_mutex. Don't forget to * protect it if you're about to implement concurrent @@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, set_buffer_new(bh_result); got_it: map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); - if (boundary) + if (blocks_to_boundary == 0) set_buffer_boundary(bh_result); + err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ cleanup: @@ -787,34 +939,21 @@ out: return err; } -static int ext3_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - handle_t *handle = NULL; - int ret; - - if (create) { - handle = ext3_journal_current_handle(); - J_ASSERT(handle != 0); - } - ret = ext3_get_block_handle(handle, inode, iblock, - bh_result, create, 1); - return ret; -} - #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) -static int -ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, - int create) +static int ext3_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) { handle_t *handle = journal_current_handle(); int ret = 0; + unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; - if (!handle) + if (!create) goto get_block; /* A read */ + if (max_blocks == 1) + goto get_block; /* A single block get */ + if (handle->h_transaction->t_state == T_LOCKED) { /* * Huge direct-io writes can hold off commits for long @@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, } get_block: - if (ret == 0) - ret = ext3_get_block_handle(handle, inode, iblock, - bh_result, create, 0); - bh_result->b_size = (1 << inode->i_blkbits); + if (ret == 0) { + ret = ext3_get_blocks_handle(handle, inode, iblock, + max_blocks, bh_result, create, 0); + if (ret > 0) { + bh_result->b_size = (ret << inode->i_blkbits); + ret = 0; + } + } return ret; } /* * `handle' can be NULL if create is zero */ -struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, - long block, int create, int * errp) +struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, + long block, int create, int *errp) { struct buffer_head dummy; int fatal = 0, err; @@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, dummy.b_state = 0; dummy.b_blocknr = -1000; buffer_trace_init(&dummy.b_history); - *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); - if (!*errp && buffer_mapped(&dummy)) { + err = ext3_get_blocks_handle(handle, inode, block, 1, + &dummy, create, 1); + if (err == 1) { + err = 0; + } else if (err >= 0) { + WARN_ON(1); + err = -EIO; + } + *errp = err; + if (!err && buffer_mapped(&dummy)) { struct buffer_head *bh; bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (!bh) { @@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, J_ASSERT(create != 0); J_ASSERT(handle != 0); - /* Now that we do not always journal data, we - should keep in mind whether this should - always journal the new buffer as metadata. - For now, regular file writes use - ext3_get_block instead, so it's not a - problem. */ + /* + * Now that we do not always journal data, we should + * keep in mind whether this should always journal the + * new buffer as metadata. For now, regular file + * writes use ext3_get_block instead, so it's not a + * problem. + */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); fatal = ext3_journal_get_create_access(handle, bh); if (!fatal && !buffer_uptodate(bh)) { - memset(bh->b_data, 0, inode->i_sb->s_blocksize); + memset(bh->b_data,0,inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); @@ -906,7 +1058,7 @@ err: return NULL; } -struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, +struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, int block, int create, int *err) { struct buffer_head * bh; @@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle, * is elevated. We'll still have enough credits for the tiny quotafile * write. */ - -static int do_journal_get_write_access(handle_t *handle, - struct buffer_head *bh) +static int do_journal_get_write_access(handle_t *handle, + struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; @@ -1025,8 +1176,7 @@ out: return ret; } -int -ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) +int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) { int err = journal_dirty_data(handle, bh); if (err) @@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh) * ext3 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ - static int ext3_ordered_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) { @@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) * we don't need to open a transaction here. */ static int ext3_ordered_writepage(struct page *page, - struct writeback_control *wbc) + struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct buffer_head *page_bufs; @@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); } -static int ext3_invalidatepage(struct page *page, unsigned long offset) +static void ext3_invalidatepage(struct page *page, unsigned long offset) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); @@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset) if (offset == 0) ClearPageChecked(page); - return journal_invalidatepage(journal, page, offset); + journal_invalidatepage(journal, page, offset); } static int ext3_releasepage(struct page *page, gfp_t wait) @@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, - ext3_direct_io_get_blocks, NULL); + ext3_get_block, NULL); /* - * Reacquire the handle: ext3_direct_io_get_block() can restart the - * transaction + * Reacquire the handle: ext3_get_block() can restart the transaction */ handle = journal_current_handle(); @@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q) * c) free the subtrees growing from the inode past the @chain[0]. * (no partially truncated stuff there). */ -static Indirect *ext3_find_shared(struct inode *inode, - int depth, - int offsets[4], - Indirect chain[4], - __le32 *top) +static Indirect *ext3_find_shared(struct inode *inode, int depth, + int offsets[4], Indirect chain[4], __le32 *top) { Indirect *partial, *p; int k, err; @@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode, } /* Writer: end */ - while(partial > p) - { + while(partial > p) { brelse(partial->bh); partial--; } @@ -1812,10 +1956,9 @@ no_top: * We release `count' blocks on disk, but (last - first) may be greater * than `count' because there can be holes in there. */ -static void -ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, - unsigned long block_to_free, unsigned long count, - __le32 *first, __le32 *last) +static void ext3_clear_blocks(handle_t *handle, struct inode *inode, + struct buffer_head *bh, unsigned long block_to_free, + unsigned long count, __le32 *first, __le32 *last) { __le32 *p; if (try_to_extend_transaction(handle, inode)) { @@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, * that's fine - as long as they are linked from the inode, the post-crash * ext3_truncate() run will find them and release them. */ - -void ext3_truncate(struct inode * inode) +void ext3_truncate(struct inode *inode) { handle_t *handle; struct ext3_inode_info *ei = EXT3_I(inode); @@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode) do_indirects: /* Kill the remaining (whole) subtrees */ switch (offsets[0]) { - default: - nr = i_data[EXT3_IND_BLOCK]; - if (nr) { - ext3_free_branches(handle, inode, NULL, - &nr, &nr+1, 1); - i_data[EXT3_IND_BLOCK] = 0; - } - case EXT3_IND_BLOCK: - nr = i_data[EXT3_DIND_BLOCK]; - if (nr) { - ext3_free_branches(handle, inode, NULL, - &nr, &nr+1, 2); - i_data[EXT3_DIND_BLOCK] = 0; - } - case EXT3_DIND_BLOCK: - nr = i_data[EXT3_TIND_BLOCK]; - if (nr) { - ext3_free_branches(handle, inode, NULL, - &nr, &nr+1, 3); - i_data[EXT3_TIND_BLOCK] = 0; - } - case EXT3_TIND_BLOCK: - ; + default: + nr = i_data[EXT3_IND_BLOCK]; + if (nr) { + ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); + i_data[EXT3_IND_BLOCK] = 0; + } + case EXT3_IND_BLOCK: + nr = i_data[EXT3_DIND_BLOCK]; + if (nr) { + ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); + i_data[EXT3_DIND_BLOCK] = 0; + } + case EXT3_DIND_BLOCK: + nr = i_data[EXT3_TIND_BLOCK]; + if (nr) { + ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); + i_data[EXT3_TIND_BLOCK] = 0; + } + case EXT3_TIND_BLOCK: + ; } ext3_discard_reservation(inode); @@ -2232,8 +2371,10 @@ do_indirects: inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); - /* In a multi-transaction truncate, we only make the final - * transaction synchronous */ + /* + * In a multi-transaction truncate, we only make the final transaction + * synchronous + */ if (IS_SYNC(inode)) handle->h_sync = 1; out_stop: @@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, struct ext3_group_desc * gdp; - if ((ino != EXT3_ROOT_INO && - ino != EXT3_JOURNAL_INO && - ino != EXT3_RESIZE_INO && - ino < EXT3_FIRST_INO(sb)) || - ino > le32_to_cpu( - EXT3_SB(sb)->s_es->s_inodes_count)) { - ext3_error (sb, "ext3_get_inode_block", + if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && + ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || + ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { + ext3_error(sb, "ext3_get_inode_block", "bad inode number: %lu", ino); return 0; } block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); if (block_group >= EXT3_SB(sb)->s_groups_count) { - ext3_error (sb, "ext3_get_inode_block", - "group >= groups count"); + ext3_error(sb,"ext3_get_inode_block","group >= groups count"); return 0; } smp_rmb(); @@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, return 0; } - gdp = (struct ext3_group_desc *) bh->b_data; + gdp = (struct ext3_group_desc *)bh->b_data; /* * Figure out the offset within the block group inode table */ @@ -2834,7 +2971,7 @@ err_out: /* - * akpm: how many blocks doth make a writepage()? + * How many blocks doth make a writepage()? * * With N blocks per page, it may be: * N data blocks @@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, } /* - * akpm: What we do here is to mark the in-core inode as clean - * with respect to inode dirtiness (it may still be data-dirty). + * What we do here is to mark the in-core inode as clean with respect to inode + * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which @@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) } /* - * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() + * ext3_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need @@ -2993,7 +3130,7 @@ out: return; } -#ifdef AKPM +#if 0 /* * Bind an inode's backing buffer_head into this transaction, to prevent * it from being flushed to disk early. Unlike @@ -3001,8 +3138,7 @@ out: * returns no iloc structure, so the caller needs to repeat the iloc * lookup to mark the inode dirty later. */ -static inline int -ext3_pin_inode(handle_t *handle, struct inode *inode) +static int ext3_pin_inode(handle_t *handle, struct inode *inode) { struct ext3_iloc iloc; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 86e443182de4..f8a5266ea1ff 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) } if (test_opt(sb, NOBH)) { - if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) { - printk(KERN_WARNING "EXT3-fs: Ignoring nobh option " - "since filesystem blocksize doesn't match " - "pagesize\n"); - clear_opt(sbi->s_mount_opt, NOBH); - } if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " "its supported only with writeback mode\n"); diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4095bc149eb1..698b85bb1dd4 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -741,7 +741,7 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp, return ret; } -struct file_operations fat_dir_operations = { +const struct file_operations fat_dir_operations = { .read = generic_read_dir, .readdir = fat_readdir, .ioctl = fat_dir_ioctl, diff --git a/fs/fat/file.c b/fs/fat/file.c index 88aa1ae13f9f..1ee25232e6af 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -112,7 +112,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp, } } -struct file_operations fat_file_operations = { +const struct file_operations fat_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 297300fe81c2..c1ce284f8a94 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock, } static int fat_get_blocks(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; int err; + unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); if (err) @@ -1435,9 +1435,6 @@ out_fail: EXPORT_SYMBOL_GPL(fat_fill_super); -int __init fat_cache_init(void); -void fat_cache_destroy(void); - static int __init init_fat_fs(void) { int err; diff --git a/fs/fcntl.c b/fs/fcntl.c index 03c789560fb8..2a2479196f96 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -412,7 +412,7 @@ out: /* Table to convert sigio signal codes into poll band bitmaps */ -static long band_table[NSIGPOLL] = { +static const long band_table[NSIGPOLL] = { POLLIN | POLLRDNORM, /* POLL_IN */ POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ @@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown) } static DEFINE_RWLOCK(fasync_lock); -static kmem_cache_t *fasync_cache; +static kmem_cache_t *fasync_cache __read_mostly; /* * fasync_helper() is used by some character device drivers (mainly mice) diff --git a/fs/fifo.c b/fs/fifo.c index d13fcd3ec803..889f722ee36d 100644 --- a/fs/fifo.c +++ b/fs/fifo.c @@ -145,6 +145,6 @@ err_nocleanup: * is contain the open that then fills in the correct operations * depending on the access mode of the file... */ -struct file_operations def_fifo_fops = { +const struct file_operations def_fifo_fops = { .open = fifo_open, /* will set read or write pipe_fops */ }; diff --git a/fs/file.c b/fs/file.c index bbc743314730..55f4e7022563 100644 --- a/fs/file.c +++ b/fs/file.c @@ -373,6 +373,6 @@ static void __devinit fdtable_defer_list_init(int cpu) void __init files_defer_init(void) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) fdtable_defer_list_init(i); } diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h index 927acf70c591..1cf1fe8466a2 100644 --- a/fs/freevxfs/vxfs_extern.h +++ b/fs/freevxfs/vxfs_extern.h @@ -63,7 +63,7 @@ extern void vxfs_clear_inode(struct inode *); /* vxfs_lookup.c */ extern struct inode_operations vxfs_dir_inode_ops; -extern struct file_operations vxfs_dir_operations; +extern const struct file_operations vxfs_dir_operations; /* vxfs_olt.c */ extern int vxfs_read_olt(struct super_block *, u_long); diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 554eb455722c..29cce456c7ce 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -56,7 +56,7 @@ struct inode_operations vxfs_dir_inode_ops = { .lookup = vxfs_lookup, }; -struct file_operations vxfs_dir_operations = { +const struct file_operations vxfs_dir_operations = { .readdir = vxfs_readdir, }; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 0c9a2ee54c91..23d1f52eb1b8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -922,7 +922,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file) return 0; } -struct file_operations fuse_dev_operations = { +const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .read = fuse_dev_read, diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c72a8a97935c..256355b80256 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1170,7 +1170,7 @@ static struct inode_operations fuse_dir_inode_operations = { .removexattr = fuse_removexattr, }; -static struct file_operations fuse_dir_operations = { +static const struct file_operations fuse_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = fuse_readdir, diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6f05379b0a0d..975f2697e866 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -12,7 +12,7 @@ #include <linux/slab.h> #include <linux/kernel.h> -static struct file_operations fuse_direct_io_file_operations; +static const struct file_operations fuse_direct_io_file_operations; static int fuse_send_open(struct inode *inode, struct file *file, int isdir, struct fuse_open_out *outargp) @@ -611,7 +611,7 @@ static int fuse_set_page_dirty(struct page *page) return 0; } -static struct file_operations fuse_file_operations = { +static const struct file_operations fuse_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, @@ -623,7 +623,7 @@ static struct file_operations fuse_file_operations = { .sendfile = generic_file_sendfile, }; -static struct file_operations fuse_direct_io_file_operations = { +static const struct file_operations fuse_direct_io_file_operations = { .llseek = generic_file_llseek, .read = fuse_direct_read, .write = fuse_direct_write, diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 4a83adfec968..a16a04fcf41e 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -346,7 +346,7 @@ static inline u64 get_node_id(struct inode *inode) } /** Device operations */ -extern struct file_operations fuse_dev_operations; +extern const struct file_operations fuse_dev_operations; /** * This is the single global spinlock which protects FUSE's structures diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index a7a7d77f3fd3..1e44dcfe49c4 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -306,8 +306,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node) for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; *p && *p != node; p = &(*p)->next_hash) ; - if (!*p) - BUG(); + BUG_ON(!*p); *p = node->next_hash; node->tree->node_hash_cnt--; } @@ -415,8 +414,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, num); spin_unlock(&tree->hash_lock); - if (node) - BUG(); + BUG_ON(node); node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); @@ -459,8 +457,7 @@ void hfs_bnode_put(struct hfs_bnode *node) dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); - if (!atomic_read(&node->refcnt)) - BUG(); + BUG_ON(!atomic_read(&node->refcnt)); if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) return; for (i = 0; i < tree->pages_per_bnode; i++) { diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 7bb11edd1488..d20131ce4b95 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -36,8 +36,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->inode = iget_locked(sb, id); if (!tree->inode) goto free_tree; - if (!(tree->inode->i_state & I_NEW)) - BUG(); + BUG_ON(!(tree->inode->i_state & I_NEW)); { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; HFS_I(tree->inode)->flags = 0; diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index 534e5a7480ef..7cd8cc03aea7 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -313,7 +313,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, return res; } -struct file_operations hfs_dir_operations = { +const struct file_operations hfs_dir_operations = { .read = generic_read_dir, .readdir = hfs_readdir, .llseek = generic_file_llseek, diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 18ce47ab1b71..3ed8663a8db1 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -169,7 +169,7 @@ extern int hfs_cat_move(u32, struct inode *, struct qstr *, extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *); /* dir.c */ -extern struct file_operations hfs_dir_operations; +extern const struct file_operations hfs_dir_operations; extern struct inode_operations hfs_dir_inode_operations; /* extent.c */ diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 39fd85b9b916..2d4ced22201b 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -17,7 +17,7 @@ #include "hfs_fs.h" #include "btree.h" -static struct file_operations hfs_file_operations; +static const struct file_operations hfs_file_operations; static struct inode_operations hfs_file_inode_operations; /*================ Variable-like macros ================*/ @@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) -{ - int ret; - - ret = hfs_get_block(inode, iblock, bh_result, create); - if (!ret) - bh_result->b_size = (1 << inode->i_blkbits); - return ret; -} - static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { @@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_dentry->d_inode->i_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, hfs_get_blocks, NULL); + offset, nr_segs, hfs_get_block, NULL); } static int hfs_writepages(struct address_space *mapping, @@ -612,7 +601,7 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) } -static struct file_operations hfs_file_operations = { +static const struct file_operations hfs_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 01a6fe3a395c..1f9ece0de326 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -483,7 +483,7 @@ struct inode_operations hfsplus_dir_inode_operations = { .rename = hfsplus_rename, }; -struct file_operations hfsplus_dir_operations = { +const struct file_operations hfsplus_dir_operations = { .read = generic_read_dir, .readdir = hfsplus_readdir, .ioctl = hfsplus_ioctl, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 12ed2b7d046b..acf66dba3e01 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) -{ - int ret; - - ret = hfsplus_get_block(inode, iblock, bh_result, create); - if (!ret) - bh_result->b_size = (1 << inode->i_blkbits); - return ret; -} - static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { @@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_dentry->d_inode->i_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, hfsplus_get_blocks, NULL); + offset, nr_segs, hfsplus_get_block, NULL); } static int hfsplus_writepages(struct address_space *mapping, @@ -291,7 +280,7 @@ static struct inode_operations hfsplus_file_inode_operations = { .listxattr = hfsplus_listxattr, }; -static struct file_operations hfsplus_file_operations = { +static const struct file_operations hfsplus_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index b3ad0bd0312f..bf0f8e16e433 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -384,7 +384,7 @@ int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync) return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync); } -static struct file_operations hostfs_file_fops = { +static const struct file_operations hostfs_file_fops = { .llseek = generic_file_llseek, .read = generic_file_read, .sendfile = generic_file_sendfile, @@ -399,7 +399,7 @@ static struct file_operations hostfs_file_fops = { .fsync = hostfs_fsync, }; -static struct file_operations hostfs_dir_fops = { +static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, .readdir = hostfs_readdir, .read = generic_read_dir, diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index b97809deba66..23b7cee72123 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -360,7 +360,6 @@ int do_statfs(char *root, long *bsize_out, long long *blocks_out, spare_out[2] = buf.f_spare[2]; spare_out[3] = buf.f_spare[3]; spare_out[4] = buf.f_spare[4]; - spare_out[5] = buf.f_spare[5]; return(0); } diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 5591f9623aa2..ecc9180645ae 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -310,7 +310,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name return ERR_PTR(-ENOENT); } -struct file_operations hpfs_dir_ops = +const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 7c995ac4081b..d3b9fffe45a1 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -119,7 +119,7 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf, return retval; } -struct file_operations hpfs_file_ops = +const struct file_operations hpfs_file_ops = { .llseek = generic_file_llseek, .read = generic_file_read, diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 4c6473ab3b34..29b7a3e55173 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -240,7 +240,7 @@ void hpfs_set_dentry_operations(struct dentry *); /* dir.c */ struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *); -extern struct file_operations hpfs_dir_ops; +extern const struct file_operations hpfs_dir_ops; /* dnode.c */ @@ -266,7 +266,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int); /* file.c */ int hpfs_file_fsync(struct file *, struct dentry *, int); -extern struct file_operations hpfs_file_ops; +extern const struct file_operations hpfs_file_ops; extern struct inode_operations hpfs_file_iops; extern struct address_space_operations hpfs_aops; diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c index a44dc5897399..2ba20cdb5baa 100644 --- a/fs/hppfs/hppfs_kern.c +++ b/fs/hppfs/hppfs_kern.c @@ -558,7 +558,7 @@ static loff_t hppfs_llseek(struct file *file, loff_t off, int where) return(default_llseek(file, off, where)); } -static struct file_operations hppfs_file_fops = { +static const struct file_operations hppfs_file_fops = { .owner = NULL, .llseek = hppfs_llseek, .read = hppfs_read, @@ -609,7 +609,7 @@ static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync) return(0); } -static struct file_operations hppfs_dir_fops = { +static const struct file_operations hppfs_dir_fops = { .owner = NULL, .readdir = hppfs_readdir, .open = hppfs_dir_open, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 25fa8bba8cb5..3a5b4e923455 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -35,7 +35,7 @@ static struct super_operations hugetlbfs_ops; static struct address_space_operations hugetlbfs_aops; -struct file_operations hugetlbfs_file_operations; +const struct file_operations hugetlbfs_file_operations; static struct inode_operations hugetlbfs_dir_inode_operations; static struct inode_operations hugetlbfs_inode_operations; @@ -566,7 +566,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) inode_init_once(&ei->vfs_inode); } -struct file_operations hugetlbfs_file_operations = { +const struct file_operations hugetlbfs_file_operations = { .mmap = hugetlbfs_file_mmap, .fsync = simple_sync_file, .get_unmapped_area = hugetlb_get_unmapped_area, diff --git a/fs/inode.c b/fs/inode.c index 85da11044adc..32b7c3375021 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -56,8 +56,8 @@ #define I_HASHBITS i_hash_shift #define I_HASHMASK i_hash_mask -static unsigned int i_hash_mask; -static unsigned int i_hash_shift; +static unsigned int i_hash_mask __read_mostly; +static unsigned int i_hash_shift __read_mostly; /* * Each inode can be on two separate lists. One is @@ -73,7 +73,7 @@ static unsigned int i_hash_shift; LIST_HEAD(inode_in_use); LIST_HEAD(inode_unused); -static struct hlist_head *inode_hashtable; +static struct hlist_head *inode_hashtable __read_mostly; /* * A simple spinlock to protect the list manipulations. @@ -98,13 +98,13 @@ static DEFINE_MUTEX(iprune_mutex); */ struct inodes_stat_t inodes_stat; -static kmem_cache_t * inode_cachep; +static kmem_cache_t * inode_cachep __read_mostly; static struct inode *alloc_inode(struct super_block *sb) { static struct address_space_operations empty_aops; static struct inode_operations empty_iops; - static struct file_operations empty_fops; + static const struct file_operations empty_fops; struct inode *inode; if (sb->s_op->alloc_inode) diff --git a/fs/inotify.c b/fs/inotify.c index a61e93e17853..367c487c014b 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -39,15 +39,15 @@ static atomic_t inotify_cookie; -static kmem_cache_t *watch_cachep; -static kmem_cache_t *event_cachep; +static kmem_cache_t *watch_cachep __read_mostly; +static kmem_cache_t *event_cachep __read_mostly; -static struct vfsmount *inotify_mnt; +static struct vfsmount *inotify_mnt __read_mostly; /* these are configurable via /proc/sys/fs/inotify/ */ -int inotify_max_user_instances; -int inotify_max_user_watches; -int inotify_max_queued_events; +int inotify_max_user_instances __read_mostly; +int inotify_max_user_watches __read_mostly; +int inotify_max_queued_events __read_mostly; /* * Lock ordering: @@ -920,7 +920,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, return ret; } -static struct file_operations inotify_fops = { +static const struct file_operations inotify_fops = { .poll = inotify_poll, .read = inotify_read, .release = inotify_release, diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 7901ac9f97ab..5440ea292c69 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -16,7 +16,7 @@ static int isofs_readdir(struct file *, void *, filldir_t); -struct file_operations isofs_dir_operations = +const struct file_operations isofs_dir_operations = { .read = generic_read_dir, .readdir = isofs_readdir, diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 439a19b1bf3e..b87ba066f5e7 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -175,6 +175,6 @@ isofs_normalize_block_and_offset(struct iso_directory_record* de, } extern struct inode_operations isofs_dir_inode_operations; -extern struct file_operations isofs_dir_operations; +extern const struct file_operations isofs_dir_operations; extern struct address_space_operations isofs_symlink_aops; extern struct export_operations isofs_export_ops; diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ada31fa272e3..c609f5034fcd 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1873,16 +1873,15 @@ zap_buffer_unlocked: } /** - * int journal_invalidatepage() + * void journal_invalidatepage() * @journal: journal to use for flush... * @page: page to flush * @offset: length of page to invalidate. * * Reap page buffers containing data after offset in page. * - * Return non-zero if the page's buffers were successfully reaped. */ -int journal_invalidatepage(journal_t *journal, +void journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { @@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal, if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) - return 1; + return; /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be @@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal, } while (bh != head); if (!offset) { - if (!may_free || !try_to_free_buffers(page)) - return 0; - J_ASSERT(!page_has_buffers(page)); + if (may_free && try_to_free_buffers(page)) + J_ASSERT(!page_has_buffers(page)); } - return 1; } /* diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index 5a4519e834da..020cc097c539 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c @@ -55,9 +55,9 @@ static int jffs_remove(struct inode *dir, struct dentry *dentry, int type); static struct super_operations jffs_ops; -static struct file_operations jffs_file_operations; +static const struct file_operations jffs_file_operations; static struct inode_operations jffs_file_inode_operations; -static struct file_operations jffs_dir_operations; +static const struct file_operations jffs_dir_operations; static struct inode_operations jffs_dir_inode_operations; static struct address_space_operations jffs_address_operations; @@ -1629,7 +1629,7 @@ static int jffs_fsync(struct file *f, struct dentry *d, int datasync) } -static struct file_operations jffs_file_operations = +static const struct file_operations jffs_file_operations = { .open = generic_file_open, .llseek = generic_file_llseek, @@ -1649,7 +1649,7 @@ static struct inode_operations jffs_file_inode_operations = }; -static struct file_operations jffs_dir_operations = +static const struct file_operations jffs_dir_operations = { .readdir = jffs_readdir, }; diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 4db8be8e90cc..5c63e0cdcf4c 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c @@ -33,13 +33,14 @@ */ #define STREAM_END_SPACE 12 -static DECLARE_MUTEX(deflate_sem); -static DECLARE_MUTEX(inflate_sem); +static DEFINE_MUTEX(deflate_mutex); +static DEFINE_MUTEX(inflate_mutex); static z_stream inf_strm, def_strm; #ifdef __KERNEL__ /* Linux-only */ #include <linux/vmalloc.h> #include <linux/init.h> +#include <linux/mutex.h> static int __init alloc_workspaces(void) { @@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in, if (*dstlen <= STREAM_END_SPACE) return -1; - down(&deflate_sem); + mutex_lock(&deflate_mutex); if (Z_OK != zlib_deflateInit(&def_strm, 3)) { printk(KERN_WARNING "deflateInit failed\n"); - up(&deflate_sem); + mutex_unlock(&deflate_mutex); return -1; } @@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, if (ret != Z_OK) { D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); zlib_deflateEnd(&def_strm); - up(&deflate_sem); + mutex_unlock(&deflate_mutex); return -1; } } @@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, *sourcelen = def_strm.total_in; ret = 0; out: - up(&deflate_sem); + mutex_unlock(&deflate_mutex); return ret; } @@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, int ret; int wbits = MAX_WBITS; - down(&inflate_sem); + mutex_lock(&inflate_mutex); inf_strm.next_in = data_in; inf_strm.avail_in = srclen; @@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { printk(KERN_WARNING "inflateInit failed\n"); - up(&inflate_sem); + mutex_unlock(&inflate_mutex); return 1; } @@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, printk(KERN_NOTICE "inflate returned %d\n", ret); } zlib_inflateEnd(&inf_strm); - up(&inflate_sem); + mutex_unlock(&inflate_mutex); return 0; } diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index a7bf9cb2567f..8bc7a5018e40 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -37,7 +37,7 @@ static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t); static int jffs2_rename (struct inode *, struct dentry *, struct inode *, struct dentry *); -struct file_operations jffs2_dir_operations = +const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, .readdir = jffs2_readdir, diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 935f273dc57b..9f4171213e58 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -38,7 +38,7 @@ int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) return 0; } -struct file_operations jffs2_file_operations = +const struct file_operations jffs2_file_operations = { .llseek = generic_file_llseek, .open = generic_file_open, diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index 59e7a393200c..d307cf548625 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -159,11 +159,11 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c); /* dir.c */ -extern struct file_operations jffs2_dir_operations; +extern const struct file_operations jffs2_dir_operations; extern struct inode_operations jffs2_dir_inode_operations; /* file.c */ -extern struct file_operations jffs2_file_operations; +extern const struct file_operations jffs2_file_operations; extern struct inode_operations jffs2_file_inode_operations; extern struct address_space_operations jffs2_file_address_operations; int jffs2_fsync(struct file *, struct dentry *, int); diff --git a/fs/jfs/file.c b/fs/jfs/file.c index e1ac6e497e2b..1c9745be5ada 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -100,7 +100,7 @@ struct inode_operations jfs_file_inode_operations = { #endif }; -struct file_operations jfs_file_operations = { +const struct file_operations jfs_file_operations = { .open = jfs_open, .llseek = generic_file_llseek, .write = generic_file_write, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 51a5fed90cca..04eb78f1252e 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, static int jfs_get_block(struct inode *ip, sector_t lblock, struct buffer_head *bh_result, int create) { - return jfs_get_blocks(ip, lblock, 1, bh_result, create); + return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, + bh_result, create); } static int jfs_writepage(struct page *page, struct writeback_control *wbc) @@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, jfs_get_blocks, NULL); + offset, nr_segs, jfs_get_block, NULL); } struct address_space_operations jfs_aops = { diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 095d471b9f9a..c30072674464 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -35,9 +35,9 @@ extern void jfs_set_inode_flags(struct inode *); extern struct address_space_operations jfs_aops; extern struct inode_operations jfs_dir_inode_operations; -extern struct file_operations jfs_dir_operations; +extern const struct file_operations jfs_dir_operations; extern struct inode_operations jfs_file_inode_operations; -extern struct file_operations jfs_file_operations; +extern const struct file_operations jfs_file_operations; extern struct inode_operations jfs_symlink_inode_operations; extern struct dentry_operations jfs_ci_dentry_operations; #endif /* _H_JFS_INODE */ diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 0b348b13b551..3315f0b1fbc0 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -69,6 +69,7 @@ #include <linux/bio.h> #include <linux/suspend.h> #include <linux/delay.h> +#include <linux/mutex.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" @@ -165,7 +166,7 @@ do { \ */ static LIST_HEAD(jfs_external_logs); static struct jfs_log *dummy_log = NULL; -static DECLARE_MUTEX(jfs_log_sem); +static DEFINE_MUTEX(jfs_log_mutex); /* * forward references @@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb) if (sbi->mntflag & JFS_INLINELOG) return open_inline_log(sb); - down(&jfs_log_sem); + mutex_lock(&jfs_log_mutex); list_for_each_entry(log, &jfs_external_logs, journal_list) { if (log->bdev->bd_dev == sbi->logdev) { if (memcmp(log->uuid, sbi->loguuid, sizeof(log->uuid))) { jfs_warn("wrong uuid on JFS journal\n"); - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return -EINVAL; } /* * add file system to log active file system list */ if ((rc = lmLogFileSystem(log, sbi, 1))) { - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return rc; } goto journal_found; @@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb) } if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return -ENOMEM; } INIT_LIST_HEAD(&log->sb_list); @@ -1151,7 +1152,7 @@ journal_found: sbi->log = log; LOG_UNLOCK(log); - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return 0; /* @@ -1168,7 +1169,7 @@ journal_found: blkdev_put(bdev); free: /* free log descriptor */ - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); kfree(log); jfs_warn("lmLogOpen: exit(%d)", rc); @@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb) { int rc; - down(&jfs_log_sem); + mutex_lock(&jfs_log_mutex); if (!dummy_log) { dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); if (!dummy_log) { - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return -ENOMEM; } INIT_LIST_HEAD(&dummy_log->sb_list); @@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb) if (rc) { kfree(dummy_log); dummy_log = NULL; - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return rc; } } @@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb) list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); JFS_SBI(sb)->log = dummy_log; LOG_UNLOCK(dummy_log); - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); return 0; } @@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb) jfs_info("lmLogClose: log:0x%p", log); - down(&jfs_log_sem); + mutex_lock(&jfs_log_mutex); LOG_LOCK(log); list_del(&sbi->log_list); LOG_UNLOCK(log); @@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb) kfree(log); out: - up(&jfs_log_sem); + mutex_unlock(&jfs_log_mutex); jfs_info("lmLogClose: exit(%d)", rc); return rc; } diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 5fbaeaadccd3..f28696f235c4 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -220,8 +220,8 @@ int __init metapage_init(void) if (metapage_cache == NULL) return -ENOMEM; - metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab, - mempool_free_slab, metapage_cache); + metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES, + metapage_cache); if (metapage_mempool == NULL) { kmem_cache_destroy(metapage_cache); @@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) return 0; } -static int metapage_invalidatepage(struct page *page, unsigned long offset) +static void metapage_invalidatepage(struct page *page, unsigned long offset) { BUG_ON(offset); - if (PageWriteback(page)) - return 0; + BUG_ON(PageWriteback(page)); - return metapage_releasepage(page, 0); + metapage_releasepage(page, 0); } struct address_space_operations jfs_metapage_aops = { diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 309cee575f7d..09ea03f62277 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1519,7 +1519,7 @@ struct inode_operations jfs_dir_inode_operations = { #endif }; -struct file_operations jfs_dir_operations = { +const struct file_operations jfs_dir_operations = { .read = generic_read_dir, .readdir = jfs_readdir, .fsync = jfs_fsync, diff --git a/fs/libfs.c b/fs/libfs.c index 4fdeaceb892c..7145ba7a48d0 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -179,7 +179,7 @@ ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t return -EISDIR; } -struct file_operations simple_dir_operations = { +const struct file_operations simple_dir_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .llseek = dcache_dir_lseek, diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 112ebf8b8dfe..729ac427d359 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -16,6 +16,7 @@ #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/lockd/sm_inter.h> +#include <linux/mutex.h> #define NLMDBG_FACILITY NLMDBG_HOSTCACHE @@ -30,7 +31,7 @@ static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; static unsigned long next_gc; static int nrhosts; -static DECLARE_MUTEX(nlm_host_sema); +static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(void); @@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, hash = NLM_ADDRHASH(sin->sin_addr.s_addr); /* Lock hash table */ - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); if (time_after_eq(jiffies, next_gc)) nlm_gc_hosts(); @@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, nlm_hosts[hash] = host; } nlm_get_host(host); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } } @@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, next_gc = 0; nohost: - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } @@ -141,19 +142,19 @@ nlm_find_client(void) * and return it */ int hash; - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { struct nlm_host *host, **hp; for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { if (host->h_server && host->h_killed == 0) { nlm_get_host(host); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return host; } } } - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); return NULL; } @@ -265,7 +266,7 @@ nlm_shutdown_hosts(void) int i; dprintk("lockd: shutting down host module\n"); - down(&nlm_host_sema); + mutex_lock(&nlm_host_mutex); /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts...\n"); @@ -276,7 +277,7 @@ nlm_shutdown_hosts(void) /* Then, perform a garbage collection pass */ nlm_gc_hosts(); - up(&nlm_host_sema); + mutex_unlock(&nlm_host_mutex); /* complain if any hosts are left */ if (nrhosts) { diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 5e85bde6c123..fd56c8872f34 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/stats.h> @@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program; struct nlmsvc_binding * nlmsvc_ops; EXPORT_SYMBOL(nlmsvc_ops); -static DECLARE_MUTEX(nlmsvc_sema); +static DEFINE_MUTEX(nlmsvc_mutex); static unsigned int nlmsvc_users; static pid_t nlmsvc_pid; int nlmsvc_grace_period; unsigned long nlmsvc_timeout; -static DECLARE_MUTEX_LOCKED(lockd_start); +static DECLARE_COMPLETION(lockd_start_done); static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); /* @@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp) * Let our maker know we're running. */ nlmsvc_pid = current->pid; - up(&lockd_start); + complete(&lockd_start_done); daemonize("lockd"); @@ -215,7 +216,7 @@ lockd_up(void) struct svc_serv * serv; int error = 0; - down(&nlmsvc_sema); + mutex_lock(&nlmsvc_mutex); /* * Unconditionally increment the user count ... this is * the number of clients who _want_ a lockd process. @@ -263,7 +264,7 @@ lockd_up(void) "lockd_up: create thread failed, error=%d\n", error); goto destroy_and_out; } - down(&lockd_start); + wait_for_completion(&lockd_start_done); /* * Note: svc_serv structures have an initial use count of 1, @@ -272,7 +273,7 @@ lockd_up(void) destroy_and_out: svc_destroy(serv); out: - up(&nlmsvc_sema); + mutex_unlock(&nlmsvc_mutex); return error; } EXPORT_SYMBOL(lockd_up); @@ -285,7 +286,7 @@ lockd_down(void) { static int warned; - down(&nlmsvc_sema); + mutex_lock(&nlmsvc_mutex); if (nlmsvc_users) { if (--nlmsvc_users) goto out; @@ -315,7 +316,7 @@ lockd_down(void) recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); out: - up(&nlmsvc_sema); + mutex_unlock(&nlmsvc_mutex); } EXPORT_SYMBOL(lockd_down); diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index c7a6e3ae44d6..a570e5c8a930 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -11,6 +11,7 @@ #include <linux/string.h> #include <linux/time.h> #include <linux/in.h> +#include <linux/mutex.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/clnt.h> #include <linux/nfsd/nfsfh.h> @@ -28,7 +29,7 @@ #define FILE_HASH_BITS 5 #define FILE_NRHASH (1<<FILE_HASH_BITS) static struct nlm_file * nlm_files[FILE_NRHASH]; -static DECLARE_MUTEX(nlm_file_sema); +static DEFINE_MUTEX(nlm_file_mutex); #ifdef NFSD_DEBUG static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) @@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, hash = file_hash(f); /* Lock file table */ - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); for (file = nlm_files[hash]; file; file = file->f_next) if (!nfs_compare_fh(&file->f_handle, f)) @@ -130,7 +131,7 @@ found: nfserr = 0; out_unlock: - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return nfserr; out_free: @@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action) struct nlm_file *file, **fp; int i; - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { fp = nlm_files + i; while ((file = *fp) != NULL) { /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ if (nlm_inspect_file(host, file, action)) { - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return 1; } @@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action) } } } - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); return 0; } @@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file) file, file->f_count); /* Lock file table */ - down(&nlm_file_sema); + mutex_lock(&nlm_file_mutex); /* If there are no more locks etc, delete the file */ if(--file->f_count == 0) { @@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file) nlm_delete_file(file); } - up(&nlm_file_sema); + mutex_unlock(&nlm_file_mutex); } /* diff --git a/fs/locks.c b/fs/locks.c index 56f996e98bbc..4d9e71d43e7e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -142,7 +142,7 @@ int lease_break_time = 45; static LIST_HEAD(file_lock_list); static LIST_HEAD(blocked_list); -static kmem_cache_t *filelock_cache; +static kmem_cache_t *filelock_cache __read_mostly; /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) @@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter) static void locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) { - if (!list_empty(&waiter->fl_block)) { - printk(KERN_ERR "locks_insert_block: removing duplicated lock " - "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, - waiter->fl_start, waiter->fl_end, waiter->fl_type); - __locks_delete_block(waiter); - } + BUG_ON(!list_empty(&waiter->fl_block)); list_add_tail(&waiter->fl_block, &blocker->fl_block); waiter->fl_next = blocker; if (IS_POSIX(blocker)) @@ -797,9 +792,7 @@ out: return error; } -EXPORT_SYMBOL(posix_lock_file); - -static int __posix_lock_file(struct inode *inode, struct file_lock *request) +static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock) { struct file_lock *fl; struct file_lock *new_fl, *new_fl2; @@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) continue; if (!posix_locks_conflict(request, fl)) continue; + if (conflock) + locks_copy_lock(conflock, fl); error = -EAGAIN; if (!(request->fl_flags & FL_SLEEP)) goto out; @@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) */ int posix_lock_file(struct file *filp, struct file_lock *fl) { - return __posix_lock_file(filp->f_dentry->d_inode, fl); + return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL); +} +EXPORT_SYMBOL(posix_lock_file); + +/** + * posix_lock_file_conf - Apply a POSIX-style lock to a file + * @filp: The file to apply the lock to + * @fl: The lock to be applied + * @conflock: Place to return a copy of the conflicting lock, if found. + * + * Except for the conflock parameter, acts just like posix_lock_file. + */ +int posix_lock_file_conf(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) +{ + return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock); } +EXPORT_SYMBOL(posix_lock_file_conf); /** * posix_lock_file_wait - Apply a POSIX-style lock to a file @@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) int error; might_sleep (); for (;;) { - error = __posix_lock_file(filp->f_dentry->d_inode, fl); + error = posix_lock_file(filp, fl); if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); @@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file(inode, &fl); + error = __posix_lock_file_conf(inode, &fl, NULL); if (error != -EAGAIN) break; if (!(fl.fl_flags & FL_SLEEP)) @@ -1694,7 +1705,7 @@ again: error = filp->f_op->lock(filp, cmd, file_lock); else { for (;;) { - error = __posix_lock_file(inode, file_lock); + error = posix_lock_file(filp, file_lock); if ((error != -EAGAIN) || (cmd == F_SETLK)) break; error = wait_event_interruptible(file_lock->fl_wait, @@ -1837,7 +1848,7 @@ again: error = filp->f_op->lock(filp, cmd, file_lock); else { for (;;) { - error = __posix_lock_file(inode, file_lock); + error = posix_lock_file(filp, file_lock); if ((error != -EAGAIN) || (cmd == F_SETLK64)) break; error = wait_event_interruptible(file_lock->fl_wait, diff --git a/fs/mbcache.c b/fs/mbcache.c index 73e754fea2d8..e4fde1ab22cd 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -311,7 +311,7 @@ fail: /* * mb_cache_shrink() * - * Removes all cache entires of a device from the cache. All cache entries + * Removes all cache entries of a device from the cache. All cache entries * currently in use cannot be freed, and thus remain in the cache. All others * are freed. * diff --git a/fs/minix/dir.c b/fs/minix/dir.c index 732502aabc05..69224d1fe043 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -14,7 +14,7 @@ typedef struct minix_dir_entry minix_dirent; static int minix_readdir(struct file *, void *, filldir_t); -struct file_operations minix_dir_operations = { +const struct file_operations minix_dir_operations = { .read = generic_read_dir, .readdir = minix_readdir, .fsync = minix_sync_file, diff --git a/fs/minix/file.c b/fs/minix/file.c index f1d77acb3f01..420b32882a10 100644 --- a/fs/minix/file.c +++ b/fs/minix/file.c @@ -15,7 +15,7 @@ */ int minix_sync_file(struct file *, struct dentry *, int); -struct file_operations minix_file_operations = { +const struct file_operations minix_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/minix/minix.h b/fs/minix/minix.h index e42a8bb89001..c55b77cdcc8e 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -81,8 +81,8 @@ extern int minix_sync_file(struct file *, struct dentry *, int); extern struct inode_operations minix_file_inode_operations; extern struct inode_operations minix_dir_inode_operations; -extern struct file_operations minix_file_operations; -extern struct file_operations minix_dir_operations; +extern const struct file_operations minix_file_operations; +extern const struct file_operations minix_dir_operations; extern struct dentry_operations minix_dentry_operations; static inline struct minix_sb_info *minix_sb(struct super_block *sb) diff --git a/fs/mpage.c b/fs/mpage.c index e431cb3878d6..9bf2eb30e6f4 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) } while (page_bh != head); } +/* + * This is the worker routine which does all the work of mapping the disk + * blocks and constructs largest possible bios, submits them for IO if the + * blocks are not contiguous on the disk. + * + * We pass a buffer_head back and forth and use its buffer_mapped() flag to + * represent the validity of its disk mapping and to decide when to do the next + * get_block() call. + */ static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, - sector_t *last_block_in_bio, get_block_t get_block) + sector_t *last_block_in_bio, struct buffer_head *map_bh, + unsigned long *first_logical_block, get_block_t get_block) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; + sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; unsigned first_hole = blocks_per_page; struct block_device *bdev = NULL; - struct buffer_head bh; int length; int fully_mapped = 1; + unsigned nblocks; + unsigned relative_block; if (page_has_buffers(page)) goto confused; block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); - last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; + last_block = block_in_file + nr_pages * blocks_per_page; + last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; + if (last_block > last_block_in_file) + last_block = last_block_in_file; + page_block = 0; + + /* + * Map blocks using the result from the previous get_blocks call first. + */ + nblocks = map_bh->b_size >> blkbits; + if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && + block_in_file < (*first_logical_block + nblocks)) { + unsigned map_offset = block_in_file - *first_logical_block; + unsigned last = nblocks - map_offset; + + for (relative_block = 0; ; relative_block++) { + if (relative_block == last) { + clear_buffer_mapped(map_bh); + break; + } + if (page_block == blocks_per_page) + break; + blocks[page_block] = map_bh->b_blocknr + map_offset + + relative_block; + page_block++; + block_in_file++; + } + bdev = map_bh->b_bdev; + } + + /* + * Then do more get_blocks calls until we are done with this page. + */ + map_bh->b_page = page; + while (page_block < blocks_per_page) { + map_bh->b_state = 0; + map_bh->b_size = 0; - bh.b_page = page; - for (page_block = 0; page_block < blocks_per_page; - page_block++, block_in_file++) { - bh.b_state = 0; if (block_in_file < last_block) { - if (get_block(inode, block_in_file, &bh, 0)) + map_bh->b_size = (last_block-block_in_file) << blkbits; + if (get_block(inode, block_in_file, map_bh, 0)) goto confused; + *first_logical_block = block_in_file; } - if (!buffer_mapped(&bh)) { + if (!buffer_mapped(map_bh)) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; + page_block++; + block_in_file++; + clear_buffer_mapped(map_bh); continue; } @@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, * we just collected from get_block into the page's buffers * so readpage doesn't have to repeat the get_block call */ - if (buffer_uptodate(&bh)) { - map_buffer_to_page(page, &bh, page_block); + if (buffer_uptodate(map_bh)) { + map_buffer_to_page(page, map_bh, page_block); goto confused; } @@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ - if (page_block && blocks[page_block-1] != bh.b_blocknr-1) + if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) goto confused; - blocks[page_block] = bh.b_blocknr; - bdev = bh.b_bdev; + nblocks = map_bh->b_size >> blkbits; + for (relative_block = 0; ; relative_block++) { + if (relative_block == nblocks) { + clear_buffer_mapped(map_bh); + break; + } else if (page_block == blocks_per_page) + break; + blocks[page_block] = map_bh->b_blocknr+relative_block; + page_block++; + block_in_file++; + } + bdev = map_bh->b_bdev; } if (first_hole != blocks_per_page) { @@ -260,7 +319,7 @@ alloc_new: goto alloc_new; } - if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) + if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) bio = mpage_bio_submit(READ, bio); else *last_block_in_bio = blocks[blocks_per_page - 1]; @@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned page_idx; sector_t last_block_in_bio = 0; struct pagevec lru_pvec; + struct buffer_head map_bh; + unsigned long first_logical_block = 0; + clear_buffer_mapped(&map_bh); pagevec_init(&lru_pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_entry(pages->prev, struct page, lru); @@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, page->index, GFP_KERNEL)) { bio = do_mpage_readpage(bio, page, nr_pages - page_idx, - &last_block_in_bio, get_block); + &last_block_in_bio, &map_bh, + &first_logical_block, + get_block); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); } else { @@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; + struct buffer_head map_bh; + unsigned long first_logical_block = 0; - bio = do_mpage_readpage(bio, page, 1, - &last_block_in_bio, get_block); + clear_buffer_mapped(&map_bh); + bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, + &map_bh, &first_logical_block, get_block); if (bio) mpage_bio_submit(READ, bio); return 0; @@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, for (page_block = 0; page_block < blocks_per_page; ) { map_bh.b_state = 0; + map_bh.b_size = 1 << blkbits; if (get_block(inode, block_in_file, &map_bh, 1)) goto confused; if (buffer_new(&map_bh)) diff --git a/fs/namei.c b/fs/namei.c index 98dc2e134362..22f6e8d16aa8 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -546,6 +546,22 @@ struct path { struct dentry *dentry; }; +static inline void dput_path(struct path *path, struct nameidata *nd) +{ + dput(path->dentry); + if (path->mnt != nd->mnt) + mntput(path->mnt); +} + +static inline void path_to_nameidata(struct path *path, struct nameidata *nd) +{ + dput(nd->dentry); + if (nd->mnt != path->mnt) + mntput(nd->mnt); + nd->mnt = path->mnt; + nd->dentry = path->dentry; +} + static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) { int error; @@ -555,8 +571,11 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata touch_atime(path->mnt, dentry); nd_set_link(nd, NULL); - if (path->mnt == nd->mnt) - mntget(path->mnt); + if (path->mnt != nd->mnt) { + path_to_nameidata(path, nd); + dget(dentry); + } + mntget(path->mnt); cookie = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(cookie); if (!IS_ERR(cookie)) { @@ -573,22 +592,6 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata return error; } -static inline void dput_path(struct path *path, struct nameidata *nd) -{ - dput(path->dentry); - if (path->mnt != nd->mnt) - mntput(path->mnt); -} - -static inline void path_to_nameidata(struct path *path, struct nameidata *nd) -{ - dput(nd->dentry); - if (nd->mnt != path->mnt) - mntput(nd->mnt); - nd->mnt = path->mnt; - nd->dentry = path->dentry; -} - /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. diff --git a/fs/namespace.c b/fs/namespace.c index 71e75bcf4d28..bf478addb852 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); static int event; -static struct list_head *mount_hashtable; +static struct list_head *mount_hashtable __read_mostly; static int hash_mask __read_mostly, hash_bits __read_mostly; -static kmem_cache_t *mnt_cache; +static kmem_cache_t *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ @@ -459,9 +459,9 @@ int may_umount_tree(struct vfsmount *mnt) spin_unlock(&vfsmount_lock); if (actual_refs > minimum_refs) - return -EBUSY; + return 0; - return 0; + return 1; } EXPORT_SYMBOL(may_umount_tree); @@ -481,10 +481,10 @@ EXPORT_SYMBOL(may_umount_tree); */ int may_umount(struct vfsmount *mnt) { - int ret = 0; + int ret = 1; spin_lock(&vfsmount_lock); if (propagate_mount_busy(mnt, 2)) - ret = -EBUSY; + ret = 0; spin_unlock(&vfsmount_lock); return ret; } diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index cfd76f431dc0..f0860c602d8b 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -49,7 +49,7 @@ extern int ncp_symlink(struct inode *, struct dentry *, const char *); #define ncp_symlink NULL #endif -struct file_operations ncp_dir_operations = +const struct file_operations ncp_dir_operations = { .read = generic_read_dir, .readdir = ncp_readdir, diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index ebdad8f6398f..e6b7c67cf057 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -283,7 +283,7 @@ static int ncp_release(struct inode *inode, struct file *file) { return 0; } -struct file_operations ncp_file_operations = +const struct file_operations ncp_file_operations = { .llseek = remote_llseek, .read = ncp_file_read, diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 99d2cfbce863..90c95adc8c1b 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -14,6 +14,7 @@ #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/nfs_fs.h> +#include <linux/mutex.h> #include <net/inet_sock.h> @@ -31,7 +32,7 @@ struct nfs_callback_data { }; static struct nfs_callback_data nfs_callback_info; -static DECLARE_MUTEX(nfs_callback_sema); +static DEFINE_MUTEX(nfs_callback_mutex); static struct svc_program nfs4_callback_program; unsigned int nfs_callback_set_tcpport; @@ -95,7 +96,7 @@ int nfs_callback_up(void) int ret = 0; lock_kernel(); - down(&nfs_callback_sema); + mutex_lock(&nfs_callback_mutex); if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) goto out; init_completion(&nfs_callback_info.started); @@ -121,7 +122,7 @@ int nfs_callback_up(void) nfs_callback_info.serv = serv; wait_for_completion(&nfs_callback_info.started); out: - up(&nfs_callback_sema); + mutex_unlock(&nfs_callback_mutex); unlock_kernel(); return ret; out_destroy: @@ -139,7 +140,7 @@ int nfs_callback_down(void) int ret = 0; lock_kernel(); - down(&nfs_callback_sema); + mutex_lock(&nfs_callback_mutex); nfs_callback_info.users--; do { if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) @@ -147,7 +148,7 @@ int nfs_callback_down(void) if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) break; } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); - up(&nfs_callback_sema); + mutex_unlock(&nfs_callback_mutex); unlock_kernel(); return ret; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 06c48b385c94..a23f34894167 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -54,7 +54,7 @@ static int nfs_rename(struct inode *, struct dentry *, static int nfs_fsync_dir(struct file *, struct dentry *, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); -struct file_operations nfs_dir_operations = { +const struct file_operations nfs_dir_operations = { .llseek = nfs_llseek_dir, .read = generic_read_dir, .readdir = nfs_readdir, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 5263b2864a44..f1df2c8d9259 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -49,7 +49,7 @@ static int nfs_check_flags(int flags); static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); -struct file_operations nfs_file_operations = { +const struct file_operations nfs_file_operations = { .llseek = nfs_file_llseek, .read = do_sync_read, .write = do_sync_write, @@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse return status; } -static int nfs_invalidate_page(struct page *page, unsigned long offset) +static void nfs_invalidate_page(struct page *page, unsigned long offset) { /* FIXME: we really should cancel any unstarted writes on this page */ - return 1; } static int nfs_release_page(struct page *page, gfp_t gfp) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 3961524fd4ab..624ca7146b6b 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -663,10 +663,8 @@ int nfs_init_readpagecache(void) if (nfs_rdata_cachep == NULL) return -ENOMEM; - nfs_rdata_mempool = mempool_create(MIN_POOL_READ, - mempool_alloc_slab, - mempool_free_slab, - nfs_rdata_cachep); + nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ, + nfs_rdata_cachep); if (nfs_rdata_mempool == NULL) return -ENOMEM; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3f5225404c97..4cfada2cc09f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void) if (nfs_wdata_cachep == NULL) return -ENOMEM; - nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE, - mempool_alloc_slab, - mempool_free_slab, - nfs_wdata_cachep); + nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, + nfs_wdata_cachep); if (nfs_wdata_mempool == NULL) return -ENOMEM; - nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT, - mempool_alloc_slab, - mempool_free_slab, - nfs_wdata_cachep); + nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, + nfs_wdata_cachep); if (nfs_commit_mempool == NULL) return -ENOMEM; diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 417ec02df44f..c340be0a3f59 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -57,27 +57,17 @@ static int exp_verify_string(char *cp, int max); #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) static struct cache_head *expkey_table[EXPKEY_HASHMAX]; -static inline int svc_expkey_hash(struct svc_expkey *item) +static void expkey_put(struct kref *ref) { - int hash = item->ek_fsidtype; - char * cp = (char*)item->ek_fsid; - int len = key_len(item->ek_fsidtype); + struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); - hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); - hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); - return hash & EXPKEY_HASHMASK; -} - -void expkey_put(struct cache_head *item, struct cache_detail *cd) -{ - if (cache_put(item, cd)) { - struct svc_expkey *key = container_of(item, struct svc_expkey, h); - if (test_bit(CACHE_VALID, &item->flags) && - !test_bit(CACHE_NEGATIVE, &item->flags)) - exp_put(key->ek_export); - auth_domain_put(key->ek_client); - kfree(key); + if (test_bit(CACHE_VALID, &key->h.flags) && + !test_bit(CACHE_NEGATIVE, &key->h.flags)) { + dput(key->ek_dentry); + mntput(key->ek_mnt); } + auth_domain_put(key->ek_client); + kfree(key); } static void expkey_request(struct cache_detail *cd, @@ -95,7 +85,10 @@ static void expkey_request(struct cache_detail *cd, (*bpp)[-1] = '\n'; } -static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *, int); +static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old); +static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *); +static struct cache_detail svc_expkey_cache; + static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) { /* client fsidtype fsid [path] */ @@ -106,6 +99,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) int fsidtype; char *ep; struct svc_expkey key; + struct svc_expkey *ek; if (mesg[mlen-1] != '\n') return -EINVAL; @@ -150,40 +144,38 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) key.ek_fsidtype = fsidtype; memcpy(key.ek_fsid, buf, len); + ek = svc_expkey_lookup(&key); + err = -ENOMEM; + if (!ek) + goto out; + /* now we want a pathname, or empty meaning NEGATIVE */ + err = -EINVAL; if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0) goto out; dprintk("Path seems to be <%s>\n", buf); err = 0; if (len == 0) { - struct svc_expkey *ek; set_bit(CACHE_NEGATIVE, &key.h.flags); - ek = svc_expkey_lookup(&key, 1); + ek = svc_expkey_update(&key, ek); if (ek) - expkey_put(&ek->h, &svc_expkey_cache); + cache_put(&ek->h, &svc_expkey_cache); + else err = -ENOMEM; } else { struct nameidata nd; - struct svc_expkey *ek; - struct svc_export *exp; err = path_lookup(buf, 0, &nd); if (err) goto out; dprintk("Found the path %s\n", buf); - exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL); - - err = -ENOENT; - if (!exp) - goto out_nd; - key.ek_export = exp; - dprintk("And found export\n"); + key.ek_mnt = nd.mnt; + key.ek_dentry = nd.dentry; - ek = svc_expkey_lookup(&key, 1); + ek = svc_expkey_update(&key, ek); if (ek) - expkey_put(&ek->h, &svc_expkey_cache); - exp_put(exp); - err = 0; - out_nd: + cache_put(&ek->h, &svc_expkey_cache); + else + err = -ENOMEM; path_release(&nd); } cache_flush(); @@ -214,35 +206,31 @@ static int expkey_show(struct seq_file *m, if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) { seq_printf(m, " "); - seq_path(m, ek->ek_export->ex_mnt, ek->ek_export->ex_dentry, "\\ \t\n"); + seq_path(m, ek->ek_mnt, ek->ek_dentry, "\\ \t\n"); } seq_printf(m, "\n"); return 0; } - -struct cache_detail svc_expkey_cache = { - .owner = THIS_MODULE, - .hash_size = EXPKEY_HASHMAX, - .hash_table = expkey_table, - .name = "nfsd.fh", - .cache_put = expkey_put, - .cache_request = expkey_request, - .cache_parse = expkey_parse, - .cache_show = expkey_show, -}; -static inline int svc_expkey_match (struct svc_expkey *a, struct svc_expkey *b) +static inline int expkey_match (struct cache_head *a, struct cache_head *b) { - if (a->ek_fsidtype != b->ek_fsidtype || - a->ek_client != b->ek_client || - memcmp(a->ek_fsid, b->ek_fsid, key_len(a->ek_fsidtype)) != 0) + struct svc_expkey *orig = container_of(a, struct svc_expkey, h); + struct svc_expkey *new = container_of(b, struct svc_expkey, h); + + if (orig->ek_fsidtype != new->ek_fsidtype || + orig->ek_client != new->ek_client || + memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0) return 0; return 1; } -static inline void svc_expkey_init(struct svc_expkey *new, struct svc_expkey *item) +static inline void expkey_init(struct cache_head *cnew, + struct cache_head *citem) { - cache_get(&item->ek_client->h); + struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); + struct svc_expkey *item = container_of(citem, struct svc_expkey, h); + + kref_get(&item->ek_client->ref); new->ek_client = item->ek_client; new->ek_fsidtype = item->ek_fsidtype; new->ek_fsid[0] = item->ek_fsid[0]; @@ -250,39 +238,94 @@ static inline void svc_expkey_init(struct svc_expkey *new, struct svc_expkey *it new->ek_fsid[2] = item->ek_fsid[2]; } -static inline void svc_expkey_update(struct svc_expkey *new, struct svc_expkey *item) +static inline void expkey_update(struct cache_head *cnew, + struct cache_head *citem) +{ + struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); + struct svc_expkey *item = container_of(citem, struct svc_expkey, h); + + new->ek_mnt = mntget(item->ek_mnt); + new->ek_dentry = dget(item->ek_dentry); +} + +static struct cache_head *expkey_alloc(void) { - cache_get(&item->ek_export->h); - new->ek_export = item->ek_export; + struct svc_expkey *i = kmalloc(sizeof(*i), GFP_KERNEL); + if (i) + return &i->h; + else + return NULL; } -static DefineSimpleCacheLookup(svc_expkey,0) /* no inplace updates */ +static struct cache_detail svc_expkey_cache = { + .owner = THIS_MODULE, + .hash_size = EXPKEY_HASHMAX, + .hash_table = expkey_table, + .name = "nfsd.fh", + .cache_put = expkey_put, + .cache_request = expkey_request, + .cache_parse = expkey_parse, + .cache_show = expkey_show, + .match = expkey_match, + .init = expkey_init, + .update = expkey_update, + .alloc = expkey_alloc, +}; -#define EXPORT_HASHBITS 8 -#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) -#define EXPORT_HASHMASK (EXPORT_HASHMAX -1) +static struct svc_expkey * +svc_expkey_lookup(struct svc_expkey *item) +{ + struct cache_head *ch; + int hash = item->ek_fsidtype; + char * cp = (char*)item->ek_fsid; + int len = key_len(item->ek_fsidtype); -static struct cache_head *export_table[EXPORT_HASHMAX]; + hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); + hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); + hash &= EXPKEY_HASHMASK; -static inline int svc_export_hash(struct svc_export *item) + ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h, + hash); + if (ch) + return container_of(ch, struct svc_expkey, h); + else + return NULL; +} + +static struct svc_expkey * +svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old) { - int rv; + struct cache_head *ch; + int hash = new->ek_fsidtype; + char * cp = (char*)new->ek_fsid; + int len = key_len(new->ek_fsidtype); - rv = hash_ptr(item->ex_client, EXPORT_HASHBITS); - rv ^= hash_ptr(item->ex_dentry, EXPORT_HASHBITS); - rv ^= hash_ptr(item->ex_mnt, EXPORT_HASHBITS); - return rv; + hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); + hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS); + hash &= EXPKEY_HASHMASK; + + ch = sunrpc_cache_update(&svc_expkey_cache, &new->h, + &old->h, hash); + if (ch) + return container_of(ch, struct svc_expkey, h); + else + return NULL; } -void svc_export_put(struct cache_head *item, struct cache_detail *cd) + +#define EXPORT_HASHBITS 8 +#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) +#define EXPORT_HASHMASK (EXPORT_HASHMAX -1) + +static struct cache_head *export_table[EXPORT_HASHMAX]; + +static void svc_export_put(struct kref *ref) { - if (cache_put(item, cd)) { - struct svc_export *exp = container_of(item, struct svc_export, h); - dput(exp->ex_dentry); - mntput(exp->ex_mnt); - auth_domain_put(exp->ex_client); - kfree(exp); - } + struct svc_export *exp = container_of(ref, struct svc_export, h.ref); + dput(exp->ex_dentry); + mntput(exp->ex_mnt); + auth_domain_put(exp->ex_client); + kfree(exp); } static void svc_export_request(struct cache_detail *cd, @@ -304,7 +347,9 @@ static void svc_export_request(struct cache_detail *cd, (*bpp)[-1] = '\n'; } -static struct svc_export *svc_export_lookup(struct svc_export *, int); +static struct svc_export *svc_export_update(struct svc_export *new, + struct svc_export *old); +static struct svc_export *svc_export_lookup(struct svc_export *); static int check_export(struct inode *inode, int flags) { @@ -417,11 +462,16 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) if (err) goto out; } - expp = svc_export_lookup(&exp, 1); + expp = svc_export_lookup(&exp); if (expp) - exp_put(expp); - err = 0; + expp = svc_export_update(&exp, expp); + else + err = -ENOMEM; cache_flush(); + if (expp == NULL) + err = -ENOMEM; + else + exp_put(expp); out: if (nd.dentry) path_release(&nd); @@ -455,6 +505,46 @@ static int svc_export_show(struct seq_file *m, seq_puts(m, ")\n"); return 0; } +static int svc_export_match(struct cache_head *a, struct cache_head *b) +{ + struct svc_export *orig = container_of(a, struct svc_export, h); + struct svc_export *new = container_of(b, struct svc_export, h); + return orig->ex_client == new->ex_client && + orig->ex_dentry == new->ex_dentry && + orig->ex_mnt == new->ex_mnt; +} + +static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) +{ + struct svc_export *new = container_of(cnew, struct svc_export, h); + struct svc_export *item = container_of(citem, struct svc_export, h); + + kref_get(&item->ex_client->ref); + new->ex_client = item->ex_client; + new->ex_dentry = dget(item->ex_dentry); + new->ex_mnt = mntget(item->ex_mnt); +} + +static void export_update(struct cache_head *cnew, struct cache_head *citem) +{ + struct svc_export *new = container_of(cnew, struct svc_export, h); + struct svc_export *item = container_of(citem, struct svc_export, h); + + new->ex_flags = item->ex_flags; + new->ex_anon_uid = item->ex_anon_uid; + new->ex_anon_gid = item->ex_anon_gid; + new->ex_fsid = item->ex_fsid; +} + +static struct cache_head *svc_export_alloc(void) +{ + struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL); + if (i) + return &i->h; + else + return NULL; +} + struct cache_detail svc_export_cache = { .owner = THIS_MODULE, .hash_size = EXPORT_HASHMAX, @@ -464,34 +554,49 @@ struct cache_detail svc_export_cache = { .cache_request = svc_export_request, .cache_parse = svc_export_parse, .cache_show = svc_export_show, + .match = svc_export_match, + .init = svc_export_init, + .update = export_update, + .alloc = svc_export_alloc, }; -static inline int svc_export_match(struct svc_export *a, struct svc_export *b) +static struct svc_export * +svc_export_lookup(struct svc_export *exp) { - return a->ex_client == b->ex_client && - a->ex_dentry == b->ex_dentry && - a->ex_mnt == b->ex_mnt; -} -static inline void svc_export_init(struct svc_export *new, struct svc_export *item) -{ - cache_get(&item->ex_client->h); - new->ex_client = item->ex_client; - new->ex_dentry = dget(item->ex_dentry); - new->ex_mnt = mntget(item->ex_mnt); + struct cache_head *ch; + int hash; + hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); + hash ^= hash_ptr(exp->ex_dentry, EXPORT_HASHBITS); + hash ^= hash_ptr(exp->ex_mnt, EXPORT_HASHBITS); + + ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, + hash); + if (ch) + return container_of(ch, struct svc_export, h); + else + return NULL; } -static inline void svc_export_update(struct svc_export *new, struct svc_export *item) +static struct svc_export * +svc_export_update(struct svc_export *new, struct svc_export *old) { - new->ex_flags = item->ex_flags; - new->ex_anon_uid = item->ex_anon_uid; - new->ex_anon_gid = item->ex_anon_gid; - new->ex_fsid = item->ex_fsid; + struct cache_head *ch; + int hash; + hash = hash_ptr(old->ex_client, EXPORT_HASHBITS); + hash ^= hash_ptr(old->ex_dentry, EXPORT_HASHBITS); + hash ^= hash_ptr(old->ex_mnt, EXPORT_HASHBITS); + + ch = sunrpc_cache_update(&svc_export_cache, &new->h, + &old->h, + hash); + if (ch) + return container_of(ch, struct svc_export, h); + else + return NULL; } -static DefineSimpleCacheLookup(svc_export,1) /* allow inplace updates */ - -struct svc_expkey * +static struct svc_expkey * exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) { struct svc_expkey key, *ek; @@ -504,7 +609,7 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) key.ek_fsidtype = fsid_type; memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); - ek = svc_expkey_lookup(&key, 0); + ek = svc_expkey_lookup(&key); if (ek != NULL) if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp))) ek = ERR_PTR(err); @@ -519,13 +624,16 @@ static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv, key.ek_client = clp; key.ek_fsidtype = fsid_type; memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); - key.ek_export = exp; + key.ek_mnt = exp->ex_mnt; + key.ek_dentry = exp->ex_dentry; key.h.expiry_time = NEVER; key.h.flags = 0; - ek = svc_expkey_lookup(&key, 1); + ek = svc_expkey_lookup(&key); + if (ek) + ek = svc_expkey_update(&key,ek); if (ek) { - expkey_put(&ek->h, &svc_expkey_cache); + cache_put(&ek->h, &svc_expkey_cache); return 0; } return -ENOMEM; @@ -573,7 +681,7 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry, key.ex_mnt = mnt; key.ex_dentry = dentry; - exp = svc_export_lookup(&key, 0); + exp = svc_export_lookup(&key); if (exp != NULL) switch (cache_check(&svc_export_cache, &exp->h, reqp)) { case 0: break; @@ -654,7 +762,7 @@ static void exp_fsid_unhash(struct svc_export *exp) ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid); if (ek && !IS_ERR(ek)) { ek->h.expiry_time = get_seconds()-1; - expkey_put(&ek->h, &svc_expkey_cache); + cache_put(&ek->h, &svc_expkey_cache); } svc_expkey_cache.nextcheck = get_seconds(); } @@ -692,7 +800,7 @@ static void exp_unhash(struct svc_export *exp) ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); if (ek && !IS_ERR(ek)) { ek->h.expiry_time = get_seconds()-1; - expkey_put(&ek->h, &svc_expkey_cache); + cache_put(&ek->h, &svc_expkey_cache); } svc_expkey_cache.nextcheck = get_seconds(); } @@ -741,8 +849,8 @@ exp_export(struct nfsctl_export *nxp) if ((nxp->ex_flags & NFSEXP_FSID) && (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) && !IS_ERR(fsid_key) && - fsid_key->ek_export && - fsid_key->ek_export != exp) + fsid_key->ek_mnt && + (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) ) goto finish; if (exp) { @@ -775,13 +883,13 @@ exp_export(struct nfsctl_export *nxp) new.ex_anon_gid = nxp->ex_anon_gid; new.ex_fsid = nxp->ex_dev; - exp = svc_export_lookup(&new, 1); + exp = svc_export_lookup(&new); + if (exp) + exp = svc_export_update(&new, exp); - if (exp == NULL) + if (!exp) goto finish; - err = 0; - if (exp_hash(clp, exp) || exp_fsid_hash(clp, exp)) { /* failed to create at least one index */ @@ -794,7 +902,7 @@ finish: if (exp) exp_put(exp); if (fsid_key && !IS_ERR(fsid_key)) - expkey_put(&fsid_key->h, &svc_expkey_cache); + cache_put(&fsid_key->h, &svc_expkey_cache); if (clp) auth_domain_put(clp); path_release(&nd); @@ -912,6 +1020,24 @@ out: return err; } +struct svc_export * +exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, + struct cache_req *reqp) +{ + struct svc_export *exp; + struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); + if (!ek || IS_ERR(ek)) + return ERR_PTR(PTR_ERR(ek)); + + exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp); + cache_put(&ek->h, &svc_expkey_cache); + + if (!exp || IS_ERR(exp)) + return ERR_PTR(PTR_ERR(exp)); + return exp; +} + + /* * Called when we need the filehandle for the root of the pseudofs, * for a given NFSv4 client. The root is defined to be the @@ -922,6 +1048,7 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp, struct cache_req *creq) { struct svc_expkey *fsid_key; + struct svc_export *exp; int rv; u32 fsidv[2]; @@ -933,9 +1060,15 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp, if (!fsid_key || IS_ERR(fsid_key)) return nfserr_perm; - rv = fh_compose(fhp, fsid_key->ek_export, - fsid_key->ek_export->ex_dentry, NULL); - expkey_put(&fsid_key->h, &svc_expkey_cache); + exp = exp_get_by_name(clp, fsid_key->ek_mnt, fsid_key->ek_dentry, creq); + if (exp == NULL) + rv = nfserr_perm; + else if (IS_ERR(exp)) + rv = nfserrno(PTR_ERR(exp)); + else + rv = fh_compose(fhp, exp, + fsid_key->ek_dentry, NULL); + cache_put(&fsid_key->h, &svc_expkey_cache); return rv; } @@ -1054,7 +1187,7 @@ static int e_show(struct seq_file *m, void *p) cache_get(&exp->h); if (cache_check(&svc_export_cache, &exp->h, NULL)) return 0; - if (cache_put(&exp->h, &svc_export_cache)) BUG(); + cache_put(&exp->h, &svc_export_cache); return svc_export_show(m, &svc_export_cache, cp); } @@ -1129,7 +1262,6 @@ exp_delclient(struct nfsctl_client *ncp) */ if (dom) { err = auth_unix_forget_old(dom); - dom->h.expiry_time = get_seconds(); auth_domain_put(dom); } diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 13369650cdf9..4b6aa60dfceb 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -76,21 +76,18 @@ struct ent { char authname[IDMAP_NAMESZ]; }; -#define DefineSimpleCacheLookupMap(STRUCT, FUNC) \ - DefineCacheLookup(struct STRUCT, h, FUNC##_lookup, \ - (struct STRUCT *item, int set), /*no setup */, \ - & FUNC##_cache, FUNC##_hash(item), FUNC##_match(item, tmp), \ - STRUCT##_init(new, item), STRUCT##_update(tmp, item), 0) - /* Common entry handling */ #define ENT_HASHBITS 8 #define ENT_HASHMAX (1 << ENT_HASHBITS) #define ENT_HASHMASK (ENT_HASHMAX - 1) -static inline void -ent_init(struct ent *new, struct ent *itm) +static void +ent_init(struct cache_head *cnew, struct cache_head *citm) { + struct ent *new = container_of(cnew, struct ent, h); + struct ent *itm = container_of(citm, struct ent, h); + new->id = itm->id; new->type = itm->type; @@ -98,19 +95,21 @@ ent_init(struct ent *new, struct ent *itm) strlcpy(new->authname, itm->authname, sizeof(new->name)); } -static inline void -ent_update(struct ent *new, struct ent *itm) +static void +ent_put(struct kref *ref) { - ent_init(new, itm); + struct ent *map = container_of(ref, struct ent, h.ref); + kfree(map); } -static void -ent_put(struct cache_head *ch, struct cache_detail *cd) +static struct cache_head * +ent_alloc(void) { - if (cache_put(ch, cd)) { - struct ent *map = container_of(ch, struct ent, h); - kfree(map); - } + struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL); + if (e) + return &e->h; + else + return NULL; } /* @@ -149,9 +148,12 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, (*bpp)[-1] = '\n'; } -static inline int -idtoname_match(struct ent *a, struct ent *b) +static int +idtoname_match(struct cache_head *ca, struct cache_head *cb) { + struct ent *a = container_of(ca, struct ent, h); + struct ent *b = container_of(cb, struct ent, h); + return (a->id == b->id && a->type == b->type && strcmp(a->authname, b->authname) == 0); } @@ -184,7 +186,8 @@ warn_no_idmapd(struct cache_detail *detail) static int idtoname_parse(struct cache_detail *, char *, int); -static struct ent *idtoname_lookup(struct ent *, int); +static struct ent *idtoname_lookup(struct ent *); +static struct ent *idtoname_update(struct ent *, struct ent *); static struct cache_detail idtoname_cache = { .owner = THIS_MODULE, @@ -196,6 +199,10 @@ static struct cache_detail idtoname_cache = { .cache_parse = idtoname_parse, .cache_show = idtoname_show, .warn_no_listener = warn_no_idmapd, + .match = idtoname_match, + .init = ent_init, + .update = ent_init, + .alloc = ent_alloc, }; int @@ -238,6 +245,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen) if (ent.h.expiry_time == 0) goto out; + error = -ENOMEM; + res = idtoname_lookup(&ent); + if (!res) + goto out; + /* Name */ error = qword_get(&buf, buf1, PAGE_SIZE); if (error == -EINVAL) @@ -252,10 +264,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen) memcpy(ent.name, buf1, sizeof(ent.name)); } error = -ENOMEM; - if ((res = idtoname_lookup(&ent, 1)) == NULL) + res = idtoname_update(&ent, res); + if (res == NULL) goto out; - ent_put(&res->h, &idtoname_cache); + cache_put(&res->h, &idtoname_cache); error = 0; out: @@ -264,7 +277,31 @@ out: return error; } -static DefineSimpleCacheLookupMap(ent, idtoname); + +static struct ent * +idtoname_lookup(struct ent *item) +{ + struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache, + &item->h, + idtoname_hash(item)); + if (ch) + return container_of(ch, struct ent, h); + else + return NULL; +} + +static struct ent * +idtoname_update(struct ent *new, struct ent *old) +{ + struct cache_head *ch = sunrpc_cache_update(&idtoname_cache, + &new->h, &old->h, + idtoname_hash(new)); + if (ch) + return container_of(ch, struct ent, h); + else + return NULL; +} + /* * Name -> ID cache @@ -291,9 +328,12 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, (*bpp)[-1] = '\n'; } -static inline int -nametoid_match(struct ent *a, struct ent *b) +static int +nametoid_match(struct cache_head *ca, struct cache_head *cb) { + struct ent *a = container_of(ca, struct ent, h); + struct ent *b = container_of(cb, struct ent, h); + return (a->type == b->type && strcmp(a->name, b->name) == 0 && strcmp(a->authname, b->authname) == 0); } @@ -317,7 +357,8 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) return 0; } -static struct ent *nametoid_lookup(struct ent *, int); +static struct ent *nametoid_lookup(struct ent *); +static struct ent *nametoid_update(struct ent *, struct ent *); static int nametoid_parse(struct cache_detail *, char *, int); static struct cache_detail nametoid_cache = { @@ -330,6 +371,10 @@ static struct cache_detail nametoid_cache = { .cache_parse = nametoid_parse, .cache_show = nametoid_show, .warn_no_listener = warn_no_idmapd, + .match = nametoid_match, + .init = ent_init, + .update = ent_init, + .alloc = ent_alloc, }; static int @@ -379,10 +424,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen) set_bit(CACHE_NEGATIVE, &ent.h.flags); error = -ENOMEM; - if ((res = nametoid_lookup(&ent, 1)) == NULL) + res = nametoid_lookup(&ent); + if (res == NULL) + goto out; + res = nametoid_update(&ent, res); + if (res == NULL) goto out; - ent_put(&res->h, &nametoid_cache); + cache_put(&res->h, &nametoid_cache); error = 0; out: kfree(buf1); @@ -390,7 +439,30 @@ out: return (error); } -static DefineSimpleCacheLookupMap(ent, nametoid); + +static struct ent * +nametoid_lookup(struct ent *item) +{ + struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache, + &item->h, + nametoid_hash(item)); + if (ch) + return container_of(ch, struct ent, h); + else + return NULL; +} + +static struct ent * +nametoid_update(struct ent *new, struct ent *old) +{ + struct cache_head *ch = sunrpc_cache_update(&nametoid_cache, + &new->h, &old->h, + nametoid_hash(new)); + if (ch) + return container_of(ch, struct ent, h); + else + return NULL; +} /* * Exported API @@ -458,24 +530,24 @@ idmap_defer(struct cache_req *req) } static inline int -do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *, int), struct ent *key, +do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item, struct idmap_defer_req *mdr) { - *item = lookup_fn(key, 0); + *item = lookup_fn(key); if (!*item) return -ENOMEM; return cache_check(detail, &(*item)->h, &mdr->req); } static inline int -do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *, int), +do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item) { int ret = -ENOMEM; - *item = lookup_fn(key, 0); + *item = lookup_fn(key); if (!*item) goto out_err; ret = -ETIMEDOUT; @@ -488,7 +560,7 @@ do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *, int), goto out_put; return 0; out_put: - ent_put(&(*item)->h, detail); + cache_put(&(*item)->h, detail); out_err: *item = NULL; return ret; @@ -496,7 +568,7 @@ out_err: static int idmap_lookup(struct svc_rqst *rqstp, - struct ent *(*lookup_fn)(struct ent *, int), struct ent *key, + struct ent *(*lookup_fn)(struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item) { struct idmap_defer_req *mdr; @@ -539,7 +611,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen if (ret) return ret; *id = item->id; - ent_put(&item->h, &nametoid_cache); + cache_put(&item->h, &nametoid_cache); return 0; } @@ -561,7 +633,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) ret = strlen(item->name); BUG_ON(ret > IDMAP_NAMESZ); memcpy(name, item->name, ret); - ent_put(&item->h, &idtoname_cache); + cache_put(&item->h, &idtoname_cache); return ret; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f6ab762bea99..47ec112b266c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -49,6 +49,7 @@ #include <linux/nfsd/state.h> #include <linux/nfsd/xdr4.h> #include <linux/namei.h> +#include <linux/mutex.h> #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir); /* Locking: * - * client_sema: + * client_mutex: * protects clientid_hashtbl[], clientstr_hashtbl[], * unconfstr_hashtbl[], uncofid_hashtbl[]. */ -static DECLARE_MUTEX(client_sema); +static DEFINE_MUTEX(client_mutex); static kmem_cache_t *stateowner_slab = NULL; static kmem_cache_t *file_slab = NULL; @@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL; void nfs4_lock_state(void) { - down(&client_sema); + mutex_lock(&client_mutex); } void nfs4_unlock_state(void) { - up(&client_sema); + mutex_unlock(&client_mutex); } static inline u32 @@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock * Note: locks.c uses the BKL to protect the inode's lock list. */ - status = posix_lock_file(filp, &file_lock); - dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status); + /* XXX?: Just to divert the locks_release_private at the start of + * locks_copy_lock: */ + conflock.fl_ops = NULL; + conflock.fl_lmops = NULL; + status = posix_lock_file_conf(filp, &file_lock, &conflock); + dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status); switch (-status) { case 0: /* success! */ update_stateid(&lock_stp->st_stateid); memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, sizeof(stateid_t)); - goto out; - case (EAGAIN): - goto conflicting_lock; + break; + case (EAGAIN): /* conflock holds conflicting lock */ + status = nfserr_denied; + dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); + nfs4_set_lock_denied(&conflock, &lock->lk_denied); + break; case (EDEADLK): status = nfserr_deadlock; - dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); - goto out; + break; default: - status = nfserrno(status); - dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); - goto out; - } - -conflicting_lock: - dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); - status = nfserr_denied; - /* XXX There is a race here. Future patch needed to provide - * an atomic posix_lock_and_test_file - */ - if (!posix_test_lock(filp, &file_lock, &conflock)) { - status = nfserr_serverfault; - goto out; + dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status); + status = nfserr_resource; + break; } - nfs4_set_lock_denied(&conflock, &lock->lk_denied); out: if (status && lock->lk_is_new && lock_sop) release_stateowner(lock_sop); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index c8960aff0968..3ef017b3b5bd 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -134,7 +134,7 @@ static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size return simple_transaction_read(file, buf, size, pos); } -static struct file_operations transaction_ops = { +static const struct file_operations transaction_ops = { .write = nfsctl_transaction_write, .read = nfsctl_transaction_read, .release = simple_transaction_release, @@ -146,7 +146,7 @@ static int exports_open(struct inode *inode, struct file *file) return seq_open(file, &nfs_exports_op); } -static struct file_operations exports_operations = { +static const struct file_operations exports_operations = { .open = exports_open, .read = seq_read, .llseek = seq_lseek, diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 7a3e397b4ed3..3f2ec2e6d06c 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -506,7 +506,7 @@ fh_put(struct svc_fh *fhp) nfsd_nr_put++; } if (exp) { - svc_export_put(&exp->h, &svc_export_cache); + cache_put(&exp->h, &svc_export_cache); fhp->fh_export = NULL; } return; diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index 1cf955bcc526..57265d563804 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -80,7 +80,7 @@ static int nfsd_proc_open(struct inode *inode, struct file *file) return single_open(file, nfsd_proc_show, NULL); } -static struct file_operations nfsd_proc_fops = { +static const struct file_operations nfsd_proc_fops = { .owner = THIS_MODULE, .open = nfsd_proc_open, .read = seq_read, diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 5320e5afaddb..31018333dc38 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -706,7 +706,7 @@ nfsd_close(struct file *filp) * after it. */ static inline int nfsd_dosync(struct file *filp, struct dentry *dp, - struct file_operations *fop) + const struct file_operations *fop) { struct inode *inode = dp->d_inode; int (*fsync) (struct file *, struct dentry *, int); diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 9d9ed3fe371d..d1e2c6f9f05e 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1553,7 +1553,7 @@ static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry, #endif /* NTFS_RW */ -struct file_operations ntfs_dir_ops = { +const struct file_operations ntfs_dir_ops = { .llseek = generic_file_llseek, /* Seek inside directory. */ .read = generic_read_dir, /* Return -EISDIR. */ .readdir = ntfs_readdir, /* Read directory contents. */ diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index f5d057e4acc2..c63a83e8da98 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2294,7 +2294,7 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry, #endif /* NTFS_RW */ -struct file_operations ntfs_file_ops = { +const struct file_operations ntfs_file_ops = { .llseek = generic_file_llseek, /* Seek inside file. */ .read = generic_file_read, /* Read from file. */ .aio_read = generic_file_aio_read, /* Async read from file. */ @@ -2337,6 +2337,6 @@ struct inode_operations ntfs_file_inode_ops = { #endif /* NTFS_RW */ }; -struct file_operations ntfs_empty_file_ops = {}; +const struct file_operations ntfs_empty_file_ops = {}; struct inode_operations ntfs_empty_inode_ops = {}; diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 0fd70295cca6..4af2ad1193ec 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c @@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) log_page_size = PAGE_CACHE_SIZE; log_page_mask = log_page_size - 1; /* - * Use generic_ffs() instead of ffs() to enable the compiler to + * Use ntfs_ffs() instead of ffs() to enable the compiler to * optimize log_page_size and log_page_bits into constants. */ - log_page_bits = generic_ffs(log_page_size) - 1; + log_page_bits = ntfs_ffs(log_page_size) - 1; size &= ~(s64)(log_page_size - 1); /* * Ensure the log file is big enough to store at least the two restart diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 4e72bc7afdf9..2438c00ec0ce 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -2670,7 +2670,7 @@ mft_rec_already_initialized: ni->name_len = 4; ni->itype.index.block_size = 4096; - ni->itype.index.block_size_bits = generic_ffs(4096) - 1; + ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1; ni->itype.index.collation_rule = COLLATION_FILE_NAME; if (vol->cluster_size <= ni->itype.index.block_size) { ni->itype.index.vcn_size = vol->cluster_size; diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h index 0624c8ef4d9c..bf7b3d7c0930 100644 --- a/fs/ntfs/ntfs.h +++ b/fs/ntfs/ntfs.h @@ -60,13 +60,13 @@ extern struct kmem_cache *ntfs_index_ctx_cache; extern struct address_space_operations ntfs_aops; extern struct address_space_operations ntfs_mst_aops; -extern struct file_operations ntfs_file_ops; +extern const struct file_operations ntfs_file_ops; extern struct inode_operations ntfs_file_inode_ops; -extern struct file_operations ntfs_dir_ops; +extern const struct file_operations ntfs_dir_ops; extern struct inode_operations ntfs_dir_inode_ops; -extern struct file_operations ntfs_empty_file_ops; +extern const struct file_operations ntfs_empty_file_ops; extern struct inode_operations ntfs_empty_inode_ops; extern struct export_operations ntfs_export_ops; @@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins, /* From fs/ntfs/upcase.c */ extern ntfschar *generate_default_upcase(void); +static inline int ntfs_ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + #endif /* _LINUX_NTFS_H */ diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index bf931ba1d364..0d858d0b25be 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -540,7 +540,6 @@ bail: * fs_count, map_bh, dio->rw == WRITE); */ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, int create) { int ret; @@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, u64 p_blkno; int contig_blocks; unsigned char blocksize_bits; + unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; if (!inode || !bh_result) { mlog(ML_ERROR, "inode or bh_result is null\n"); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 84f153aca692..64cd52860c87 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2017,7 +2017,7 @@ out: return ret; } -static struct file_operations ocfs2_dlm_debug_fops = { +static const struct file_operations ocfs2_dlm_debug_fops = { .open = ocfs2_dlm_debug_open, .release = ocfs2_dlm_debug_release, .read = seq_read, diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 4b4cbadd5838..34e903a6a46b 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1176,7 +1176,7 @@ struct inode_operations ocfs2_special_file_iops = { .getattr = ocfs2_getattr, }; -struct file_operations ocfs2_fops = { +const struct file_operations ocfs2_fops = { .read = do_sync_read, .write = do_sync_write, .sendfile = generic_file_sendfile, @@ -1188,7 +1188,7 @@ struct file_operations ocfs2_fops = { .aio_write = ocfs2_file_aio_write, }; -struct file_operations ocfs2_dops = { +const struct file_operations ocfs2_dops = { .read = generic_read_dir, .readdir = ocfs2_readdir, .fsync = ocfs2_sync_file, diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index a5ea33b24060..740c9e7ca599 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -26,8 +26,8 @@ #ifndef OCFS2_FILE_H #define OCFS2_FILE_H -extern struct file_operations ocfs2_fops; -extern struct file_operations ocfs2_dops; +extern const struct file_operations ocfs2_fops; +extern const struct file_operations ocfs2_dops; extern struct inode_operations ocfs2_file_iops; extern struct inode_operations ocfs2_special_file_iops; struct ocfs2_alloc_context; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index ae3440ca083c..6a610ae53583 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, BUG_ON(!bh); BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); - mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n", + mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", (unsigned long long)bh->b_blocknr, type, (type == OCFS2_JOURNAL_ACCESS_CREATE) ? "OCFS2_JOURNAL_ACCESS_CREATE" : @@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) } mlog(0, "inode->i_size = %lld\n", inode->i_size); - mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks); + mlog(0, "inode->i_blocks = %llu\n", + (unsigned long long)inode->i_blocks); mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); /* call the kernels journal init function now */ @@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode) memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); - mlog(0, "Force reading %lu blocks\n", - (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))); + mlog(0, "Force reading %llu blocks\n", + (unsigned long long)(inode->i_blocks >> + (inode->i_sb->s_blocksize_bits - 9))); v_blkno = 0; while (v_blkno < diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 274f61d0cda9..0673862c8bdd 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, * write i_size + 1 bytes. */ blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; - mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n", - inode->i_blocks, i_size_read(inode), blocks); + mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", + (unsigned long long)inode->i_blocks, + i_size_read(inode), blocks); /* Sanity check -- make sure we're going to fit. */ if (bytes_left > diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index aeb0106890e4..0f14276a2e51 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -581,17 +581,17 @@ int property_release (struct inode *inode, struct file *filp) return 0; } -static struct file_operations openpromfs_prop_ops = { +static const struct file_operations openpromfs_prop_ops = { .read = property_read, .write = property_write, .release = property_release, }; -static struct file_operations openpromfs_nodenum_ops = { +static const struct file_operations openpromfs_nodenum_ops = { .read = nodenum_read, }; -static struct file_operations openprom_operations = { +static const struct file_operations openprom_operations = { .read = generic_read_dir, .readdir = openpromfs_readdir, }; diff --git a/fs/partitions/check.c b/fs/partitions/check.c index f924f459bdb8..af0cb4b9e784 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -297,6 +297,25 @@ struct kobj_type ktype_part = { .sysfs_ops = &part_sysfs_ops, }; +static inline void partition_sysfs_add_subdir(struct hd_struct *p) +{ + struct kobject *k; + + k = kobject_get(&p->kobj); + p->holder_dir = kobject_add_dir(k, "holders"); + kobject_put(k); +} + +static inline void disk_sysfs_add_subdirs(struct gendisk *disk) +{ + struct kobject *k; + + k = kobject_get(&disk->kobj); + disk->holder_dir = kobject_add_dir(k, "holders"); + disk->slave_dir = kobject_add_dir(k, "slaves"); + kobject_put(k); +} + void delete_partition(struct gendisk *disk, int part) { struct hd_struct *p = disk->part[part-1]; @@ -310,6 +329,8 @@ void delete_partition(struct gendisk *disk, int part) p->ios[0] = p->ios[1] = 0; p->sectors[0] = p->sectors[1] = 0; devfs_remove("%s/part%d", disk->devfs_name, part); + if (p->holder_dir) + kobject_unregister(p->holder_dir); kobject_unregister(&p->kobj); } @@ -337,6 +358,7 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len) p->kobj.parent = &disk->kobj; p->kobj.ktype = &ktype_part; kobject_register(&p->kobj); + partition_sysfs_add_subdir(p); disk->part[part-1] = p; } @@ -383,6 +405,7 @@ void register_disk(struct gendisk *disk) if ((err = kobject_add(&disk->kobj))) return; disk_sysfs_symlinks(disk); + disk_sysfs_add_subdirs(disk); kobject_uevent(&disk->kobj, KOBJ_ADD); /* No minors to use for partitions */ @@ -483,6 +506,10 @@ void del_gendisk(struct gendisk *disk) devfs_remove_disk(disk); + if (disk->holder_dir) + kobject_unregister(disk->holder_dir); + if (disk->slave_dir) + kobject_unregister(disk->slave_dir); if (disk->driverfs_dev) { char *disk_name = make_block_name(disk); sysfs_remove_link(&disk->kobj, "device"); diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c index 87f50444fd39..3f0a780c9cec 100644 --- a/fs/partitions/devfs.c +++ b/fs/partitions/devfs.c @@ -6,7 +6,7 @@ #include <linux/vmalloc.h> #include <linux/genhd.h> #include <linux/bitops.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> struct unique_numspace { @@ -16,7 +16,7 @@ struct unique_numspace { struct semaphore mutex; }; -static DECLARE_MUTEX(numspace_mutex); +static DEFINE_MUTEX(numspace_mutex); static int expand_numspace(struct unique_numspace *s) { @@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s) { int rval = 0; - down(&numspace_mutex); + mutex_lock(&numspace_mutex); if (s->num_free < 1) rval = expand_numspace(s); if (!rval) { @@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s) --s->num_free; __set_bit(rval, s->bits); } - up(&numspace_mutex); + mutex_unlock(&numspace_mutex); return rval; } @@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number) int old_val; if (number >= 0) { - down(&numspace_mutex); + mutex_lock(&numspace_mutex); old_val = __test_and_clear_bit(number, s->bits); if (old_val) ++s->num_free; - up(&numspace_mutex); + mutex_unlock(&numspace_mutex); } } diff --git a/fs/pipe.c b/fs/pipe.c index d976866a115b..e2f4f1d9ffc2 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -568,7 +568,7 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) * The file_operations structs are not static because they * are also used in linux/fs/fifo.c to do operations on FIFOs. */ -struct file_operations read_fifo_fops = { +const struct file_operations read_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, @@ -580,7 +580,7 @@ struct file_operations read_fifo_fops = { .fasync = pipe_read_fasync, }; -struct file_operations write_fifo_fops = { +const struct file_operations write_fifo_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, @@ -592,7 +592,7 @@ struct file_operations write_fifo_fops = { .fasync = pipe_write_fasync, }; -struct file_operations rdwr_fifo_fops = { +const struct file_operations rdwr_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, @@ -675,7 +675,7 @@ fail_page: return NULL; } -static struct vfsmount *pipe_mnt; +static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { return 1; diff --git a/fs/proc/array.c b/fs/proc/array.c index 7eb1bd7f800c..7a76ad570230 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) unsigned long min_flt = 0, maj_flt = 0; cputime_t cutime, cstime, utime, stime; unsigned long rsslim = 0; - DEFINE_KTIME(it_real_value); struct task_struct *t; char tcomm[sizeof(task->comm)]; @@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) utime = cputime_add(utime, task->signal->utime); stime = cputime_add(stime, task->signal->stime); } - it_real_value = task->signal->real_timer.expires; } ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0; read_unlock(&tasklist_lock); @@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) start_time = nsec_to_clock_t(start_time); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ -%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ +%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, tcomm, @@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) priority, nice, num_threads, - (long) ktime_to_clock_t(it_real_value), start_time, vsize, mm ? get_mm_rss(mm) : 0, diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 20e5c4509a43..4ba03009cf72 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -19,6 +19,7 @@ #include <linux/idr.h> #include <linux/namei.h> #include <linux/bitops.h> +#include <linux/spinlock.h> #include <asm/uaccess.h> #include "internal.h" @@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); static loff_t proc_file_lseek(struct file *, loff_t, int); +DEFINE_SPINLOCK(proc_subdir_lock); + int proc_match(int len, const char *name, struct proc_dir_entry *de) { if (de->namelen != len) @@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name, const char *cp = name, *next; struct proc_dir_entry *de; int len; + int rtn = 0; + spin_lock(&proc_subdir_lock); de = &proc_root; while (1) { next = strchr(cp, '/'); @@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name, if (proc_match(len, cp, de)) break; } - if (!de) - return -ENOENT; + if (!de) { + rtn = -ENOENT; + goto out; + } cp += len + 1; } *residual = cp; *ret = de; - return 0; +out: + spin_unlock(&proc_subdir_lock); + return rtn; } static DEFINE_IDR(proc_inum_idr); @@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam int error = -ENOENT; lock_kernel(); + spin_lock(&proc_subdir_lock); de = PDE(dir); if (de) { for (de = de->subdir; de ; de = de->next) { @@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { unsigned int ino = de->low_ino; + spin_unlock(&proc_subdir_lock); error = -EINVAL; inode = proc_get_inode(dir->i_sb, ino, de); + spin_lock(&proc_subdir_lock); break; } } } + spin_unlock(&proc_subdir_lock); unlock_kernel(); if (inode) { @@ -447,11 +460,13 @@ int proc_readdir(struct file * filp, filp->f_pos++; /* fall through */ default: + spin_lock(&proc_subdir_lock); de = de->subdir; i -= 2; for (;;) { if (!de) { ret = 1; + spin_unlock(&proc_subdir_lock); goto out; } if (!i) @@ -461,12 +476,16 @@ int proc_readdir(struct file * filp, } do { + /* filldir passes info to user space */ + spin_unlock(&proc_subdir_lock); if (filldir(dirent, de->name, de->namelen, filp->f_pos, de->low_ino, de->mode >> 12) < 0) goto out; + spin_lock(&proc_subdir_lock); filp->f_pos++; de = de->next; } while (de); + spin_unlock(&proc_subdir_lock); } ret = 1; out: unlock_kernel(); @@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp if (i == 0) return -EAGAIN; dp->low_ino = i; + + spin_lock(&proc_subdir_lock); dp->next = dir->subdir; dp->parent = dir; dir->subdir = dp; + spin_unlock(&proc_subdir_lock); + if (S_ISDIR(dp->mode)) { if (dp->proc_iops == NULL) { dp->proc_fops = &proc_dir_operations; @@ -537,7 +560,7 @@ static void proc_kill_inodes(struct proc_dir_entry *de) struct file * filp = list_entry(p, struct file, f_u.fu_list); struct dentry * dentry = filp->f_dentry; struct inode * inode; - struct file_operations *fops; + const struct file_operations *fops; if (dentry->d_op != &proc_dentry_operations) continue; @@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) if (!parent && xlate_proc_name(name, &parent, &fn) != 0) goto out; len = strlen(fn); + + spin_lock(&proc_subdir_lock); for (p = &parent->subdir; *p; p=&(*p)->next ) { if (!proc_match(len, fn, *p)) continue; @@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) } break; } + spin_unlock(&proc_subdir_lock); out: return; } diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 95a1cf32b838..0502f17b860d 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -30,7 +30,7 @@ do { \ #endif -extern void create_seq_entry(char *name, mode_t mode, struct file_operations *f); +extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **); extern int proc_tid_stat(struct task_struct *, char *); extern int proc_tgid_stat(struct task_struct *, char *); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index adc2cd95169a..17f6e8fa1397 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -31,7 +31,7 @@ static int open_kcore(struct inode * inode, struct file * filp) static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *); -struct file_operations proc_kcore_operations = { +const struct file_operations proc_kcore_operations = { .read = read_kcore, .open = open_kcore, }; diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index 10d37bf25206..ff3b90b56e9d 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c @@ -47,7 +47,7 @@ static unsigned int kmsg_poll(struct file *file, poll_table *wait) } -struct file_operations proc_kmsg_operations = { +const struct file_operations proc_kmsg_operations = { .read = kmsg_read, .poll = kmsg_poll, .open = kmsg_open, diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index 9bdd077d6f55..596b4b4f1cc8 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np, * properties are quite unimportant for us though, thus we * simply "skip" them here, but we do have to check. */ + spin_lock(&proc_subdir_lock); for (ent = de->subdir; ent != NULL; ent = ent->next) if (!strcmp(ent->name, pp->name)) break; + spin_unlock(&proc_subdir_lock); if (ent != NULL) { printk(KERN_WARNING "device-tree: property \"%s\" name" " conflicts with node in %s\n", pp->name, diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1e9ea37d457e..ef5a3323f4b5 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -534,7 +534,7 @@ static int show_stat(struct seq_file *p, void *v) if (wall_to_monotonic.tv_nsec) --jif; - for_each_cpu(i) { + for_each_possible_cpu(i) { int j; user = cputime64_add(user, kstat_cpu(i).cpustat.user); @@ -731,7 +731,7 @@ static struct file_operations proc_sysrq_trigger_operations = { struct proc_dir_entry *proc_root_kcore; -void create_seq_entry(char *name, mode_t mode, struct file_operations *f) +void create_seq_entry(char *name, mode_t mode, const struct file_operations *f) { struct proc_dir_entry *entry; entry = create_proc_entry(name, mode, NULL); diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 4063fb32f78c..7efa73d44c9a 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -172,7 +172,7 @@ static int open_vmcore(struct inode *inode, struct file *filp) return 0; } -struct file_operations proc_vmcore_operations = { +const struct file_operations proc_vmcore_operations = { .read = read_vmcore, .open = open_vmcore, }; diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index 7a8f5595c26f..9031948fefd0 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -81,7 +81,7 @@ out: return 0; } -struct file_operations qnx4_dir_operations = +const struct file_operations qnx4_dir_operations = { .read = generic_read_dir, .readdir = qnx4_readdir, diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c index c33963fded9e..62af4b1348bd 100644 --- a/fs/qnx4/file.c +++ b/fs/qnx4/file.c @@ -19,7 +19,7 @@ * We have mostly NULL's here: the current defaults are ok for * the qnx4 filesystem. */ -struct file_operations qnx4_file_operations = +const struct file_operations qnx4_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 6ada2095b9ac..00a933eb820c 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -32,7 +32,7 @@ struct address_space_operations ramfs_aops = { .commit_write = simple_commit_write }; -struct file_operations ramfs_file_operations = { +const struct file_operations ramfs_file_operations = { .read = generic_file_read, .write = generic_file_write, .mmap = generic_file_mmap, diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index b1ca234068f6..f443a84b98a5 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -33,7 +33,7 @@ struct address_space_operations ramfs_aops = { .commit_write = simple_commit_write }; -struct file_operations ramfs_file_operations = { +const struct file_operations ramfs_file_operations = { .mmap = ramfs_nommu_mmap, .get_unmapped_area = ramfs_nommu_get_unmapped_area, .read = generic_file_read, diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h index 272c8a7120b0..313237631b49 100644 --- a/fs/ramfs/internal.h +++ b/fs/ramfs/internal.h @@ -11,5 +11,5 @@ extern struct address_space_operations ramfs_aops; -extern struct file_operations ramfs_file_operations; +extern const struct file_operations ramfs_file_operations; extern struct inode_operations ramfs_file_inode_operations; diff --git a/fs/read_write.c b/fs/read_write.c index 34b1bf259efd..6256ca81a718 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -19,7 +19,7 @@ #include <asm/uaccess.h> #include <asm/unistd.h> -struct file_operations generic_ro_fops = { +const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read = generic_file_read, .mmap = generic_file_readonly_mmap, diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index d71ac6579289..973c819f8033 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -18,7 +18,7 @@ static int reiserfs_readdir(struct file *, void *, filldir_t); static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync); -struct file_operations reiserfs_dir_operations = { +const struct file_operations reiserfs_dir_operations = { .read = generic_read_dir, .readdir = reiserfs_readdir, .fsync = reiserfs_dir_fsync, diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index d0c1e865963e..010094d14da6 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -1566,7 +1566,7 @@ static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user * buf, return generic_file_aio_write(iocb, buf, count, pos); } -struct file_operations reiserfs_file_operations = { +const struct file_operations reiserfs_file_operations = { .read = generic_file_read, .write = reiserfs_file_write, .ioctl = reiserfs_ioctl, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index d60f6238c66a..9857e50f85e7 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, direct_IO request. */ static int reiserfs_get_blocks_direct_io(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, int create) { @@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh) } /* clm -- taken from fs/buffer.c:block_invalidate_page */ -static int reiserfs_invalidatepage(struct page *page, unsigned long offset) +static void reiserfs_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; struct inode *inode = page->mapping->host; @@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset) * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (!offset && ret) + if (!offset && ret) { ret = try_to_release_page(page, 0); + /* maybe should BUG_ON(!ret); - neilb */ + } out: - return ret; + return; } static int reiserfs_set_page_dirty(struct page *page) diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 78b40621b88b..27bd3a1df2ad 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh) char b[BDEVNAME_SIZE]; sprintf(buf, - "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", + "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", bdevname(bh->b_bdev, b), bh->b_size, (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), bh->b_state, bh->b_page, diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index ef6caed9336b..731688e1cfe3 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -470,7 +470,7 @@ static int r_open(struct inode *inode, struct file *file) return ret; } -static struct file_operations r_file_operations = { +static const struct file_operations r_file_operations = { .open = r_open, .read = seq_read, .llseek = seq_lseek, diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index c2fc424d7d5c..9b9eda7b335c 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -463,7 +463,7 @@ static struct address_space_operations romfs_aops = { .readpage = romfs_readpage }; -static struct file_operations romfs_dir_operations = { +static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, .readdir = romfs_readdir, }; diff --git a/fs/select.c b/fs/select.c index 1815a57d2255..b3a3a1326af6 100644 --- a/fs/select.c +++ b/fs/select.c @@ -29,12 +29,6 @@ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) -struct poll_table_entry { - struct file * filp; - wait_queue_t wait; - wait_queue_head_t * wait_address; -}; - struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; @@ -64,13 +58,23 @@ void poll_initwait(struct poll_wqueues *pwq) init_poll_funcptr(&pwq->pt, __pollwait); pwq->error = 0; pwq->table = NULL; + pwq->inline_index = 0; } EXPORT_SYMBOL(poll_initwait); +static void free_poll_entry(struct poll_table_entry *entry) +{ + remove_wait_queue(entry->wait_address,&entry->wait); + fput(entry->filp); +} + void poll_freewait(struct poll_wqueues *pwq) { struct poll_table_page * p = pwq->table; + int i; + for (i = 0; i < pwq->inline_index; i++) + free_poll_entry(pwq->inline_entries + i); while (p) { struct poll_table_entry * entry; struct poll_table_page *old; @@ -78,8 +82,7 @@ void poll_freewait(struct poll_wqueues *pwq) entry = p->entry; do { entry--; - remove_wait_queue(entry->wait_address,&entry->wait); - fput(entry->filp); + free_poll_entry(entry); } while (entry > p->entries); old = p; p = p->next; @@ -89,12 +92,14 @@ void poll_freewait(struct poll_wqueues *pwq) EXPORT_SYMBOL(poll_freewait); -static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, - poll_table *_p) +static struct poll_table_entry *poll_get_entry(poll_table *_p) { struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt); struct poll_table_page *table = p->table; + if (p->inline_index < N_INLINE_POLL_ENTRIES) + return p->inline_entries + p->inline_index++; + if (!table || POLL_TABLE_FULL(table)) { struct poll_table_page *new_table; @@ -102,7 +107,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, if (!new_table) { p->error = -ENOMEM; __set_current_state(TASK_RUNNING); - return; + return NULL; } new_table->entry = new_table->entries; new_table->next = table; @@ -110,16 +115,21 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, table = new_table; } - /* Add a new entry */ - { - struct poll_table_entry * entry = table->entry; - table->entry = entry+1; - get_file(filp); - entry->filp = filp; - entry->wait_address = wait_address; - init_waitqueue_entry(&entry->wait, current); - add_wait_queue(wait_address,&entry->wait); - } + return table->entry++; +} + +/* Add a new entry */ +static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, + poll_table *p) +{ + struct poll_table_entry *entry = poll_get_entry(p); + if (!entry) + return; + get_file(filp); + entry->filp = filp; + entry->wait_address = wait_address; + init_waitqueue_entry(&entry->wait, current); + add_wait_queue(wait_address,&entry->wait); } #define FDS_IN(fds, n) (fds->in + n) @@ -210,7 +220,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) for (i = 0; i < n; ++rinp, ++routp, ++rexp) { unsigned long in, out, ex, all_bits, bit = 1, mask, j; unsigned long res_in = 0, res_out = 0, res_ex = 0; - struct file_operations *f_op = NULL; + const struct file_operations *f_op = NULL; struct file *file = NULL; in = *inp++; out = *outp++; ex = *exp++; @@ -221,17 +231,18 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) } for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { + int fput_needed; if (i >= n) break; if (!(bit & all_bits)) continue; - file = fget(i); + file = fget_light(i, &fput_needed); if (file) { f_op = file->f_op; mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) mask = (*f_op->poll)(file, retval ? NULL : wait); - fput(file); + fput_light(file, fput_needed); if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; @@ -284,16 +295,6 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) return retval; } -static void *select_bits_alloc(int size) -{ - return kmalloc(6 * size, GFP_KERNEL); -} - -static void select_bits_free(void *bits, int size) -{ - kfree(bits); -} - /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return @@ -312,6 +313,8 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, char *bits; int ret, size, max_fdset; struct fdtable *fdt; + /* Allocate small arguments on the stack to save memory and be faster */ + char stack_fds[SELECT_STACK_ALLOC]; ret = -EINVAL; if (n < 0) @@ -332,7 +335,10 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, */ ret = -ENOMEM; size = FDS_BYTES(n); - bits = select_bits_alloc(size); + if (6*size < SELECT_STACK_ALLOC) + bits = stack_fds; + else + bits = kmalloc(6 * size, GFP_KERNEL); if (!bits) goto out_nofds; fds.in = (unsigned long *) bits; @@ -367,7 +373,8 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, ret = -EFAULT; out: - select_bits_free(bits, size); + if (bits != stack_fds) + kfree(bits); out_nofds: return ret; } @@ -551,14 +558,15 @@ static void do_pollfd(unsigned int num, struct pollfd * fdpage, fdp = fdpage+i; fd = fdp->fd; if (fd >= 0) { - struct file * file = fget(fd); + int fput_needed; + struct file * file = fget_light(fd, &fput_needed); mask = POLLNVAL; if (file != NULL) { mask = DEFAULT_POLLMASK; if (file->f_op && file->f_op->poll) mask = file->f_op->poll(file, *pwait); mask &= fdp->events | POLLERR | POLLHUP; - fput(file); + fput_light(file, fput_needed); } if (mask) { *pwait = NULL; @@ -619,6 +627,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list, return count; } +#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ + sizeof(struct pollfd)) + int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) { struct poll_wqueues table; @@ -628,6 +639,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) struct poll_list *walk; struct fdtable *fdt; int max_fdset; + /* Allocate small arguments on the stack to save memory and be faster */ + char stack_pps[POLL_STACK_ALLOC]; + struct poll_list *stack_pp = NULL; /* Do a sanity check on nfds ... */ rcu_read_lock(); @@ -645,14 +659,23 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) err = -ENOMEM; while(i!=0) { struct poll_list *pp; - pp = kmalloc(sizeof(struct poll_list)+ - sizeof(struct pollfd)* - (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i), - GFP_KERNEL); - if(pp==NULL) - goto out_fds; + int num, size; + if (stack_pp == NULL) + num = N_STACK_PPS; + else + num = POLLFD_PER_PAGE; + if (num > i) + num = i; + size = sizeof(struct poll_list) + sizeof(struct pollfd)*num; + if (!stack_pp) + stack_pp = pp = (struct poll_list *)stack_pps; + else { + pp = kmalloc(size, GFP_KERNEL); + if (!pp) + goto out_fds; + } pp->next=NULL; - pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i); + pp->len = num; if (head == NULL) head = pp; else @@ -660,7 +683,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) walk = pp; if (copy_from_user(pp->entries, ufds + nfds-i, - sizeof(struct pollfd)*pp->len)) { + sizeof(struct pollfd)*num)) { err = -EFAULT; goto out_fds; } @@ -689,7 +712,8 @@ out_fds: walk = head; while(walk!=NULL) { struct poll_list *pp = walk->next; - kfree(walk); + if (walk != stack_pp) + kfree(walk); walk = pp; } poll_freewait(&table); diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 0424d06b147e..34c7a11d91f0 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c @@ -34,7 +34,7 @@ static int smb_rename(struct inode *, struct dentry *, static int smb_make_node(struct inode *,struct dentry *,int,dev_t); static int smb_link(struct dentry *, struct inode *, struct dentry *); -struct file_operations smb_dir_operations = +const struct file_operations smb_dir_operations = { .read = generic_read_dir, .readdir = smb_readdir, diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c index 7042e62726a4..c56bd99a9701 100644 --- a/fs/smbfs/file.c +++ b/fs/smbfs/file.c @@ -401,7 +401,7 @@ smb_file_permission(struct inode *inode, int mask, struct nameidata *nd) return error; } -struct file_operations smb_file_operations = +const struct file_operations smb_file_operations = { .llseek = remote_llseek, .read = smb_file_read, diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h index e866ec8660d0..47664597e6b1 100644 --- a/fs/smbfs/proto.h +++ b/fs/smbfs/proto.h @@ -35,7 +35,7 @@ extern int smb_proc_symlink(struct smb_sb_info *server, struct dentry *d, const extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry); extern void smb_install_null_ops(struct smb_ops *ops); /* dir.c */ -extern struct file_operations smb_dir_operations; +extern const struct file_operations smb_dir_operations; extern struct inode_operations smb_dir_inode_operations; extern struct inode_operations smb_dir_inode_operations_unix; extern void smb_new_dentry(struct dentry *dentry); @@ -64,7 +64,7 @@ extern int smb_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat extern int smb_notify_change(struct dentry *dentry, struct iattr *attr); /* file.c */ extern struct address_space_operations smb_file_aops; -extern struct file_operations smb_file_operations; +extern const struct file_operations smb_file_operations; extern struct inode_operations smb_file_inode_operations; /* ioctl.c */ extern int smb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/fs/super.c b/fs/super.c index 8743e9bbb297..a66f66bb8049 100644 --- a/fs/super.c +++ b/fs/super.c @@ -37,6 +37,7 @@ #include <linux/writeback.h> /* for the emergency remount stuff */ #include <linux/idr.h> #include <linux/kobject.h> +#include <linux/mutex.h> #include <asm/uaccess.h> @@ -380,9 +381,9 @@ restart: void sync_filesystems(int wait) { struct super_block *sb; - static DECLARE_MUTEX(mutex); + static DEFINE_MUTEX(mutex); - down(&mutex); /* Could be down_interruptible */ + mutex_lock(&mutex); /* Could be down_interruptible */ spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { if (!sb->s_op->sync_fs) @@ -411,7 +412,7 @@ restart: goto restart; } spin_unlock(&sb_lock); - up(&mutex); + mutex_unlock(&mutex); } /** diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index 78899eeab974..c16a93c353c0 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c @@ -163,7 +163,7 @@ static int release(struct inode * inode, struct file * file) return 0; } -struct file_operations bin_fops = { +const struct file_operations bin_fops = { .read = read, .write = write, .mmap = mmap, diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 9ee956864445..f26880a4785e 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -503,7 +503,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin) return offset; } -struct file_operations sysfs_dir_operations = { +const struct file_operations sysfs_dir_operations = { .open = sysfs_dir_open, .release = sysfs_dir_close, .llseek = sysfs_dir_lseek, diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 5e83e7246788..830f76fa098c 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -348,7 +348,7 @@ static int sysfs_release(struct inode * inode, struct file * filp) return 0; } -struct file_operations sysfs_file_operations = { +const struct file_operations sysfs_file_operations = { .read = sysfs_read_file, .write = sysfs_write_file, .llseek = generic_file_llseek, diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index cf11d5b789d9..32958a7c50e9 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -21,9 +21,9 @@ extern int sysfs_setattr(struct dentry *dentry, struct iattr *iattr); extern struct rw_semaphore sysfs_rename_sem; extern struct super_block * sysfs_sb; -extern struct file_operations sysfs_dir_operations; -extern struct file_operations sysfs_file_operations; -extern struct file_operations bin_fops; +extern const struct file_operations sysfs_dir_operations; +extern const struct file_operations sysfs_file_operations; +extern const struct file_operations bin_fops; extern struct inode_operations sysfs_dir_inode_operations; extern struct inode_operations sysfs_symlink_inode_operations; diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index cce8b05cba5a..8c66e9270dd6 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -20,7 +20,7 @@ static int sysv_readdir(struct file *, void *, filldir_t); -struct file_operations sysv_dir_operations = { +const struct file_operations sysv_dir_operations = { .read = generic_read_dir, .readdir = sysv_readdir, .fsync = sysv_sync_file, diff --git a/fs/sysv/file.c b/fs/sysv/file.c index da69abc06240..a59e303135fa 100644 --- a/fs/sysv/file.c +++ b/fs/sysv/file.c @@ -19,7 +19,7 @@ * We have mostly NULLs here: the current defaults are OK for * the coh filesystem. */ -struct file_operations sysv_file_operations = { +const struct file_operations sysv_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index b7f9b4a42aab..393a480e4deb 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h @@ -159,8 +159,8 @@ extern ino_t sysv_inode_by_name(struct dentry *); extern struct inode_operations sysv_file_inode_operations; extern struct inode_operations sysv_dir_inode_operations; extern struct inode_operations sysv_fast_symlink_inode_operations; -extern struct file_operations sysv_file_operations; -extern struct file_operations sysv_dir_operations; +extern const struct file_operations sysv_file_operations; +extern const struct file_operations sysv_dir_operations; extern struct address_space_operations sysv_aops; extern struct super_operations sysv_sops; extern struct dentry_operations sysv_dentry_operations; diff --git a/fs/udf/dir.c b/fs/udf/dir.c index f5222527fe39..8c28efa3b8ff 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -42,7 +42,7 @@ static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *); /* readdir and lookup functions */ -struct file_operations udf_dir_operations = { +const struct file_operations udf_dir_operations = { .read = generic_read_dir, .readdir = udf_readdir, .ioctl = udf_ioctl, diff --git a/fs/udf/file.c b/fs/udf/file.c index a6f2acc1f15c..e34b00e303f1 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -248,7 +248,7 @@ static int udf_release_file(struct inode * inode, struct file * filp) return 0; } -struct file_operations udf_file_operations = { +const struct file_operations udf_file_operations = { .read = generic_file_read, .ioctl = udf_ioctl, .open = generic_file_open, diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 1d5800e0cbe7..023e19ba5a2e 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -44,9 +44,9 @@ struct buffer_head; struct super_block; extern struct inode_operations udf_dir_inode_operations; -extern struct file_operations udf_dir_operations; +extern const struct file_operations udf_dir_operations; extern struct inode_operations udf_file_inode_operations; -extern struct file_operations udf_file_operations; +extern const struct file_operations udf_file_operations; extern struct address_space_operations udf_aops; extern struct address_space_operations udf_adinicb_aops; extern struct address_space_operations udf_symlink_aops; diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 7c10c68902ae..1a561202d3f4 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -620,7 +620,7 @@ int ufs_empty_dir (struct inode * inode) return 1; } -struct file_operations ufs_dir_operations = { +const struct file_operations ufs_dir_operations = { .read = generic_read_dir, .readdir = ufs_readdir, .fsync = file_fsync, diff --git a/fs/ufs/file.c b/fs/ufs/file.c index 62ad481810ef..312fd3f86313 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -31,7 +31,7 @@ * the ufs filesystem. */ -struct file_operations ufs_file_operations = { +const struct file_operations ufs_file_operations = { .llseek = generic_file_llseek, .read = generic_file_read, .write = generic_file_write, diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 97fc056130eb..c02f7c5b7462 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1310,20 +1310,21 @@ xfs_get_block( struct buffer_head *bh_result, int create) { - return __xfs_get_block(inode, iblock, 0, bh_result, - create, 0, BMAPI_WRITE); + return __xfs_get_block(inode, iblock, + bh_result->b_size >> inode->i_blkbits, + bh_result, create, 0, BMAPI_WRITE); } STATIC int xfs_get_blocks_direct( struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh_result, int create) { - return __xfs_get_block(inode, iblock, max_blocks, bh_result, - create, 1, BMAPI_WRITE|BMAPI_DIRECT); + return __xfs_get_block(inode, iblock, + bh_result->b_size >> inode->i_blkbits, + bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); } STATIC void @@ -1442,14 +1443,14 @@ xfs_vm_readpages( return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); } -STATIC int +STATIC void xfs_vm_invalidatepage( struct page *page, unsigned long offset) { xfs_page_trace(XFS_INVALIDPAGE_ENTER, page->mapping->host, page, offset); - return block_invalidatepage(page, offset); + block_invalidatepage(page, offset); } struct address_space_operations xfs_address_space_operations = { diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 185567a6a561..85997b1205f5 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -528,7 +528,7 @@ open_exec_out: } #endif /* HAVE_FOP_OPEN_EXEC */ -struct file_operations xfs_file_operations = { +const struct file_operations xfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, @@ -550,7 +550,7 @@ struct file_operations xfs_file_operations = { #endif }; -struct file_operations xfs_invis_file_operations = { +const struct file_operations xfs_invis_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, @@ -570,7 +570,7 @@ struct file_operations xfs_invis_file_operations = { }; -struct file_operations xfs_dir_file_operations = { +const struct file_operations xfs_dir_file_operations = { .read = generic_read_dir, .readdir = xfs_file_readdir, .unlocked_ioctl = xfs_file_ioctl, diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h index a8417d7af5f9..ad6173da5678 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/linux-2.6/xfs_iops.h @@ -22,9 +22,9 @@ extern struct inode_operations xfs_inode_operations; extern struct inode_operations xfs_dir_inode_operations; extern struct inode_operations xfs_symlink_inode_operations; -extern struct file_operations xfs_file_operations; -extern struct file_operations xfs_dir_file_operations; -extern struct file_operations xfs_invis_file_operations; +extern const struct file_operations xfs_file_operations; +extern const struct file_operations xfs_dir_file_operations; +extern const struct file_operations xfs_invis_file_operations; extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, int, unsigned int, void __user *); diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 8355faf8ffde..1884300417e3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -375,9 +375,8 @@ xfs_init_zones(void) if (!xfs_ioend_zone) goto out_destroy_vnode_zone; - xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, - mempool_alloc_slab, mempool_free_slab, - xfs_ioend_zone); + xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, + xfs_ioend_zone); if (!xfs_ioend_pool) goto out_free_ioend_zone; return 0; diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index 302201f1a097..3f88715e811e 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h @@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x) static inline unsigned long ffz(unsigned long word) { -#if defined(__alpha_cix__) && defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) /* Whee. EV67 can calculate it directly. */ return __kernel_cttz(~word); #else @@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word) */ static inline unsigned long __ffs(unsigned long word) { -#if defined(__alpha_cix__) && defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) /* Whee. EV67 can calculate it directly. */ return __kernel_cttz(word); #else @@ -313,20 +313,20 @@ static inline int ffs(int word) /* * fls: find last bit set. */ -#if defined(__alpha_cix__) && defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) static inline int fls(int word) { return 64 - __kernel_ctlz(word & 0xffffffff); } #else -#define fls generic_fls +#include <asm-generic/bitops/fls.h> #endif -#define fls64 generic_fls64 +#include <asm-generic/bitops/fls64.h> /* Compute powers of two for the given integer. */ static inline long floor_log2(unsigned long word) { -#if defined(__alpha_cix__) && defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) return 63 - __kernel_ctlz(word); #else long bit; @@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word) * of bits set) of a N-bit word */ -#if defined(__alpha_cix__) && defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) /* Whee. EV67 can calculate it directly. */ static inline unsigned long hweight64(unsigned long w) { @@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w) #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) #else -static inline unsigned long hweight64(unsigned long w) -{ - unsigned long result; - for (result = 0; w ; w >>= 1) - result += (w & 1); - return result; -} - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #endif #endif /* __KERNEL__ */ -/* - * Find next zero bit in a bitmap reasonably efficiently.. - */ -static inline unsigned long -find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr; - unsigned long result = offset & ~63UL; - unsigned long tmp; - - p += offset >> 6; - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (64-offset); - if (size < 64) - goto found_first; - if (~tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ - found_middle: - return result + ffz(tmp); -} - -/* - * Find next one bit in a bitmap reasonably efficiently. - */ -static inline unsigned long -find_next_bit(const void * addr, unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr; - unsigned long result = offset & ~63UL; - unsigned long tmp; - - p += offset >> 6; - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp &= ~0UL << offset; - if (size < 64) - goto found_first; - if (tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if ((tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - found_first: - tmp &= ~0UL >> (64 - size); - if (!tmp) - return result + size; - found_middle: - return result + __ffs(tmp); -} - -/* - * The optimizer actually does good code for this case. - */ -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ @@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3]) return __ffs(b0) + ofs; } +#include <asm-generic/bitops/ext2-non-atomic.h> -#define ext2_set_bit __test_and_set_bit #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit __test_and_clear_bit #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) __set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) + +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h index c203fc2fa5cd..ecb17a72acc3 100644 --- a/include/asm-alpha/fpu.h +++ b/include/asm-alpha/fpu.h @@ -130,7 +130,7 @@ rdfpcr(void) { unsigned long tmp, ret; -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) __asm__ __volatile__ ( "ftoit $f0,%0\n\t" "mf_fpcr $f0\n\t" @@ -154,7 +154,7 @@ wrfpcr(unsigned long val) { unsigned long tmp; -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) __asm__ __volatile__ ( "ftoit $f0,%0\n\t" "itoft %1,$f0\n\t" diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h index a011ef4cf3d3..192d80c875b0 100644 --- a/include/asm-alpha/mmzone.h +++ b/include/asm-alpha/mmzone.h @@ -59,9 +59,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define local_mapnr(kvaddr) \ - ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr))) - /* * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory * and returns the kaddr corresponding to first physical page in the @@ -86,8 +83,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) pte_t pte; \ unsigned long pfn; \ \ - pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \ - pfn += page_zone(page)->zone_start_pfn << 32; \ + pfn = page_to_pfn(page) << 32; \ pte_val(pte) = pfn | pgprot_val(pgprot); \ \ pte; \ @@ -104,19 +100,8 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) __xx; \ }) -#define pfn_to_page(pfn) \ -({ \ - unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \ - (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \ -}) - -#define page_to_pfn(page) \ - ((page) - page_zone(page)->zone_mem_map + \ - (page_zone(page)->zone_start_pfn)) - #define page_to_pa(page) \ - ((( (page) - page_zone(page)->zone_mem_map ) \ - + page_zone(page)->zone_start_pfn) << PAGE_SHIFT) + (page_to_pfn(page) << PAGE_SHIFT) #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) #define pfn_valid(pfn) \ diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index fa0b41b164a7..61bcf70b5eac 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h @@ -85,8 +85,6 @@ typedef unsigned long pgprot_t; #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #ifndef CONFIG_DISCONTIGMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) ((pfn) < max_mapnr) @@ -95,9 +93,9 @@ typedef unsigned long pgprot_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _ALPHA_PAGE_H */ diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h index 95707182b3ed..76f89356b6a7 100644 --- a/include/asm-alpha/poll.h +++ b/include/asm-alpha/poll.h @@ -12,8 +12,8 @@ #define POLLWRNORM (1 << 8) #define POLLWRBAND (1 << 9) #define POLLMSG (1 << 10) -#define POLLREMOVE (1 << 11) -#define POLLRDHUP (1 << 12) +#define POLLREMOVE (1 << 12) +#define POLLRDHUP (1 << 13) struct pollfd { diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index d02de721ecc1..0ac54b1a8bad 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) return res & mask; } -/* - * Now the non-atomic variants. We let the compiler handle all - * optimisations for these. These are all _native_ endian. - */ -static inline void __set_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] |= (1UL << (nr & 31)); -} - -static inline void __clear_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] &= ~(1UL << (nr & 31)); -} - -static inline void __change_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] ^= (1UL << (nr & 31)); -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval | mask; - return oldval & mask; -} - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval & ~mask; - return oldval & mask; -} - -static inline int __test_and_change_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval ^ mask; - return oldval & mask; -} - -/* - * This routine doesn't need to be atomic. - */ -static inline int __test_bit(int nr, const volatile unsigned long * p) -{ - return (p[nr >> 5] >> (nr & 31)) & 1UL; -} +#include <asm-generic/bitops/non-atomic.h> /* * A note about Endian-ness. @@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) -#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) -#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) @@ -292,57 +232,41 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #if __LINUX_ARM_ARCH__ < 5 -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) -{ - int k; - - word = ~word; - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int k; - - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * fls: find last bit set. - */ +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/ffs.h> -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ +#else -#define ffs(x) generic_ffs(x) +static inline int constant_fls(int x) +{ + int r = 32; -#else + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} /* * On ARMv5 and above those functions can be implemented around @@ -350,39 +274,18 @@ static inline unsigned long __ffs(unsigned long word) */ #define fls(x) \ - ( __builtin_constant_p(x) ? generic_fls(x) : \ + ( __builtin_constant_p(x) ? constant_fls(x) : \ ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) -#define fls64(x) generic_fls64(x) #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) #define __ffs(x) (ffs(x) - 1) #define ffz(x) __ffs( ~(x) ) #endif -/* - * Find first bit set in a 168-bit bitmap, where the first - * 128 bits are unlikely to be set. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ - unsigned long v; - unsigned int off; - - for (off = 0; v = b[off], off < 4; off++) { - if (unlikely(v)) - break; - } - return __ffs(v) + off * 32; -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ +#include <asm-generic/bitops/fls64.h> -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> /* * Ext2 is defined to use little-endian byte ordering. @@ -397,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b) #define ext2_clear_bit_atomic(lock,nr,p) \ test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ @@ -410,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b) #define minix_set_bit(nr,p) \ __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_set_bit(nr,p) \ __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_clear_bit(nr,p) \ diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index b4e1146ab682..afa5c3ea077c 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h @@ -172,9 +172,7 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * virt_addr_valid(k) indicates whether a virtual address is valid */ #ifndef CONFIG_DISCONTIGMEM - -#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) -#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) +#define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET) #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) @@ -189,13 +187,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * around in memory. */ #include <linux/numa.h> - -#define page_to_pfn(page) \ - (( (page) - page_zone(page)->zone_mem_map) \ - + page_zone(page)->zone_start_pfn) - -#define pfn_to_page(pfn) \ - (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT)) +#define arch_pfn_to_nid(pfn) (PFN_TO_NID(pfn)) +#define arch_local_page_offset(pfn, nid) (LOCAL_MAP_NR((pfn) << PAGE_OFFSET)) #define pfn_valid(pfn) \ ({ \ @@ -243,4 +236,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #endif +#include <asm-generic/memory_model.h> + #endif diff --git a/include/asm-arm/rtc.h b/include/asm-arm/rtc.h index 370dfe77589d..1a5c9232a91e 100644 --- a/include/asm-arm/rtc.h +++ b/include/asm-arm/rtc.h @@ -25,9 +25,6 @@ struct rtc_ops { int (*proc)(char *buf); }; -void rtc_time_to_tm(unsigned long, struct rtc_time *); -int rtc_tm_to_time(struct rtc_time *, unsigned long *); -int rtc_valid_tm(struct rtc_time *); void rtc_next_alarm_time(struct rtc_time *, struct rtc_time *, struct rtc_time *); void rtc_update(unsigned long, unsigned long); int register_rtc(struct rtc_ops *); diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h index d87f8634e625..19a69573a654 100644 --- a/include/asm-arm26/bitops.h +++ b/include/asm-arm26/bitops.h @@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) return res & mask; } -/* - * Now the non-atomic variants. We let the compiler handle all - * optimisations for these. These are all _native_ endian. - */ -static inline void __set_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] |= (1UL << (nr & 31)); -} - -static inline void __clear_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] &= ~(1UL << (nr & 31)); -} - -static inline void __change_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] ^= (1UL << (nr & 31)); -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval | mask; - return oldval & mask; -} - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval & ~mask; - return oldval & mask; -} - -static inline int __test_and_change_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval ^ mask; - return oldval & mask; -} - -/* - * This routine doesn't need to be atomic. - */ -static inline int __test_bit(int nr, const volatile unsigned long * p) -{ - return (p[nr >> 5] >> (nr & 31)) & 1UL; -} +#include <asm-generic/bitops/non-atomic.h> /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. @@ -211,7 +153,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) -#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -219,80 +160,13 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) -{ - int k; - - word = ~word; - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int k; - - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -#define ffs(x) generic_ffs(x) - -/* - * Find first bit set in a 168-bit bitmap, where the first - * 128 bits are unlikely to be set. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - unsigned long v; - unsigned int off; - - for (off = 0; v = b[off], off < 4; off++) { - if (unlikely(v)) - break; - } - return __ffs(v) + off * 32; -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> /* * Ext2 is defined to use little-endian byte ordering. @@ -307,7 +181,7 @@ static inline int sched_find_first_bit(unsigned long *b) #define ext2_clear_bit_atomic(lock,nr,p) \ test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ @@ -320,7 +194,7 @@ static inline int sched_find_first_bit(unsigned long *b) #define minix_set_bit(nr,p) \ __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_set_bit(nr,p) \ __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_clear_bit(nr,p) \ diff --git a/include/asm-arm26/memory.h b/include/asm-arm26/memory.h index 20d78616f650..a65f10b80dfb 100644 --- a/include/asm-arm26/memory.h +++ b/include/asm-arm26/memory.h @@ -81,8 +81,7 @@ static inline void *phys_to_virt(unsigned long x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) -#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) +#define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET) #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) @@ -98,4 +97,5 @@ static inline void *phys_to_virt(unsigned long x) */ #define page_to_bus(page) (page_address(page)) +#include <asm-generic/memory_model.h> #endif diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h index b7fef1572dc0..a569065113d9 100644 --- a/include/asm-cris/bitops.h +++ b/include/asm-cris/bitops.h @@ -39,8 +39,6 @@ struct __dummy { unsigned long a[100]; }; #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) -#define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr) - /* * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -54,8 +52,6 @@ struct __dummy { unsigned long a[100]; }; #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) -#define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr) - /* * change_bit - Toggle a bit in memory * @nr: Bit to change @@ -68,18 +64,6 @@ struct __dummy { unsigned long a[100]; }; #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) -/* - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ - -#define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr) - /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set @@ -101,19 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) retval = (mask & *adr) != 0; *adr |= mask; cris_atomic_restore(addr, flags); - local_irq_restore(flags); - return retval; -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned int mask, retval; - unsigned int *adr = (unsigned int *)addr; - - adr += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *adr) != 0; - *adr |= mask; return retval; } @@ -148,27 +119,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned int mask, retval; - unsigned int *adr = (unsigned int *)addr; - - adr += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *adr) != 0; - *adr &= ~mask; - return retval; -} -/** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from @@ -191,42 +141,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return retval; } -/* WARNING: non atomic and it can be reordered! */ - -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - unsigned int mask, retval; - unsigned int *adr = (unsigned int *)addr; - - adr += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *adr) != 0; - *adr ^= mask; - - return retval; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - * - * This routine doesn't need to be atomic. - */ - -static inline int test_bit(int nr, const volatile unsigned long *addr) -{ - unsigned int mask; - unsigned int *adr = (unsigned int *)addr; - - adr += nr >> 5; - mask = 1 << (nr & 0x1f); - return ((mask & *adr) != 0); -} - -/* - * Find-bit routines.. - */ +#include <asm-generic/bitops/non-atomic.h> /* * Since we define it "external", it collides with the built-in @@ -235,152 +150,18 @@ static inline int test_bit(int nr, const volatile unsigned long *addr) */ #define ffs kernel_ffs -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/find.h> -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/ext2-non-atomic.h> -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - - found_first: - tmp |= ~0UL << size; - found_middle: - return result + ffz(tmp); -} - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if ((tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (32 - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -#define ext2_set_bit test_and_set_bit #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit test_and_clear_bit #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit - -/* Bitmap functions for the minix filesystem. */ -#define minix_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (unlikely(b[3])) - return __ffs(b[3]) + 96; - if (b[4]) - return __ffs(b[4]) + 128; - return __ffs(b[5]) + 32 + 128; -} +#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h index c99c478c482f..3787633e6209 100644 --- a/include/asm-cris/page.h +++ b/include/asm-cris/page.h @@ -43,8 +43,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ /* for that before indexing into the page table starting at mem_map */ -#define pfn_to_page(pfn) (mem_map + ((pfn) - (PAGE_OFFSET >> PAGE_SHIFT))) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + (PAGE_OFFSET >> PAGE_SHIFT)) +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so @@ -77,6 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _CRIS_PAGE_H */ diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h index f686b519878e..6344d06390b9 100644 --- a/include/asm-frv/bitops.h +++ b/include/asm-frv/bitops.h @@ -22,20 +22,7 @@ #ifdef __KERNEL__ -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) -{ - unsigned long result = 0; - - while (word & 1) { - result++; - word >>= 1; - } - return result; -} +#include <asm-generic/bitops/ffz.h> /* * clear_bit() doesn't provide any barrier for the compiler. @@ -171,51 +158,9 @@ static inline int __test_bit(int nr, const volatile void * addr) __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr))) -extern int find_next_bit(const unsigned long *addr, int size, int offset); - -#define find_first_bit(addr, size) find_next_bit(addr, size, 0) - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -static inline int find_next_zero_bit(const void *addr, int size, int offset) -{ - const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -#define ffs(x) generic_ffs(x) -#define __ffs(x) (ffs(x) - 1) +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/find.h> /* * fls: find last bit set. @@ -228,114 +173,17 @@ found_middle: \ bit ? 33 - bit : bit; \ }) -#define fls64(x) generic_fls64(x) -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/ext2-non-atomic.h> -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr)) -#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr)) - -#define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr) -#define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr) - -static inline int ext2_test_bit(int nr, const volatile void * addr) -{ - const volatile unsigned char *ADDR = (const unsigned char *) addr; - int mask; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - return ((mask & *ADDR) != 0); -} - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static inline unsigned long ext2_find_next_zero_bit(const void *addr, - unsigned long size, - unsigned long offset) -{ - const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease preformance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} +#define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) +#define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) -#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) +#include <asm-generic/bitops/minix-le.h> #endif /* __KERNEL__ */ diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h index fca9d90e32c9..08b3d1da3583 100644 --- a/include/asm-frv/futex.h +++ b/include/asm-frv/futex.h @@ -9,5 +9,11 @@ extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + return -ENOSYS; +} + #endif #endif diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h index b8221b611b5c..dc0f7e08a4c2 100644 --- a/include/asm-frv/page.h +++ b/include/asm-frv/page.h @@ -57,13 +57,9 @@ extern unsigned long min_low_pfn; extern unsigned long max_pfn; #ifdef CONFIG_MMU -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long) ((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) - #else -#define pfn_to_page(pfn) (&mem_map[(pfn) - (PAGE_OFFSET >> PAGE_SHIFT)]) -#define page_to_pfn(page) ((PAGE_OFFSET >> PAGE_SHIFT) + (unsigned long) ((page) - mem_map)) +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) #endif @@ -87,6 +83,7 @@ extern unsigned long max_pfn; #define WANT_PAGE_VIRTUAL 1 #endif +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _ASM_PAGE_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 0e6d9852008c..1f9d99193df8 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -5,77 +5,27 @@ * For the benefit of those who are trying to port Linux to another * architecture, here are some C-language equivalents. You should * recode these in the native assembly language, if at all possible. - * To guarantee atomicity, these routines call cli() and sti() to - * disable interrupts while they operate. (You have to provide inline - * routines to cli() and sti().) - * - * Also note, these routines assume that you have 32 bit longs. - * You will have to change this if you are trying to port Linux to the - * Alpha architecture or to a Cray. :-) * * C language equivalents written by Theodore Ts'o, 9/26/92 */ -extern __inline__ int set_bit(int nr,long * addr) -{ - int mask, retval; - - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - cli(); - retval = (mask & *addr) != 0; - *addr |= mask; - sti(); - return retval; -} - -extern __inline__ int clear_bit(int nr, long * addr) -{ - int mask, retval; - - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - cli(); - retval = (mask & *addr) != 0; - *addr &= ~mask; - sti(); - return retval; -} - -extern __inline__ int test_bit(int nr, const unsigned long * addr) -{ - int mask; - - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - return ((mask & *addr) != 0); -} - -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -#define ffs(x) generic_ffs(x) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/hweight.h> -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 000000000000..9a3274aecf83 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h @@ -0,0 +1,43 @@ +#ifndef _ASM_GENERIC_BITOPS___FFS_H_ +#define _ASM_GENERIC_BITOPS___FFS_H_ + +#include <asm/types.h> + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 000000000000..78339319ba02 --- /dev/null +++ b/include/asm-generic/bitops/atomic.h @@ -0,0 +1,191 @@ +#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_ATOMIC_H_ + +#include <asm/types.h> + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +#ifdef CONFIG_SMP +#include <asm/spinlock.h> +#include <asm/cache.h> /* we use L1_CACHE_BYTES */ + +/* Use an array of spinlocks for our atomic_ts. + * Hash function to index into a different SPINLOCK. + * Since "a" is usually an address, use one spinlock per cacheline. + */ +# define ATOMIC_HASH_SIZE 4 +# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) + +extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; + +/* Can't use raw_spin_lock_irq because of #include problems, so + * this is the substitute */ +#define _atomic_spin_lock_irqsave(l,f) do { \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ + local_irq_save(f); \ + __raw_spin_lock(s); \ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do { \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ + __raw_spin_unlock(s); \ + local_irq_restore(f); \ +} while(0) + + +#else +# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) +# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) +#endif + +/* + * NMI events can occur at any time, including when interrupts have been + * disabled by *_irqsave(). So you can get NMI events occurring while a + * *_bit function is holding a spin lock. If the NMI handler also wants + * to do bit manipulation (and they do) then you can get a deadlock + * between the original caller of *_bit() and the NMI handler. + * + * by Keith Owens + */ + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writting portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p |= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p &= ~mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p ^= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old ^ mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 000000000000..ab1c875efb74 --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h @@ -0,0 +1,22 @@ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ + +#define ext2_set_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ + spin_unlock(lock); \ + ret; \ + }) + +#define ext2_clear_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ + spin_unlock(lock); \ + ret; \ + }) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h new file mode 100644 index 000000000000..1697404afa05 --- /dev/null +++ b/include/asm-generic/bitops/ext2-non-atomic.h @@ -0,0 +1,18 @@ +#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ + +#include <asm-generic/bitops/le.h> + +#define ext2_set_bit(nr,addr) \ + generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) +#define ext2_clear_bit(nr,addr) \ + generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) + +#define ext2_test_bit(nr,addr) \ + generic_test_le_bit((nr),(unsigned long *)(addr)) +#define ext2_find_first_zero_bit(addr, size) \ + generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) +#define ext2_find_next_zero_bit(addr, size, off) \ + generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 000000000000..fbbb43af7dc0 --- /dev/null +++ b/include/asm-generic/bitops/ffs.h @@ -0,0 +1,41 @@ +#ifndef _ASM_GENERIC_BITOPS_FFS_H_ +#define _ASM_GENERIC_BITOPS_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 000000000000..6744bd4cdf46 --- /dev/null +++ b/include/asm-generic/bitops/ffz.h @@ -0,0 +1,12 @@ +#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ +#define _ASM_GENERIC_BITOPS_FFZ_H_ + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 000000000000..72a51e5a12ef --- /dev/null +++ b/include/asm-generic/bitops/find.h @@ -0,0 +1,13 @@ +#ifndef _ASM_GENERIC_BITOPS_FIND_H_ +#define _ASM_GENERIC_BITOPS_FIND_H_ + +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); + +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 000000000000..850859bc5069 --- /dev/null +++ b/include/asm-generic/bitops/fls.h @@ -0,0 +1,41 @@ +#ifndef _ASM_GENERIC_BITOPS_FLS_H_ +#define _ASM_GENERIC_BITOPS_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +static inline int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 000000000000..1b6b17ce2428 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h @@ -0,0 +1,14 @@ +#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ +#define _ASM_GENERIC_BITOPS_FLS64_H_ + +#include <asm/types.h> + +static inline int fls64(__u64 x) +{ + __u32 h = x >> 32; + if (h) + return fls(h) + 32; + return fls(x); +} + +#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 000000000000..fbbc383771da --- /dev/null +++ b/include/asm-generic/bitops/hweight.h @@ -0,0 +1,11 @@ +#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ + +#include <asm/types.h> + +extern unsigned int hweight32(unsigned int w); +extern unsigned int hweight16(unsigned int w); +extern unsigned int hweight8(unsigned int w); +extern unsigned long hweight64(__u64 w); + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 000000000000..b9c7e5d2d2ad --- /dev/null +++ b/include/asm-generic/bitops/le.h @@ -0,0 +1,53 @@ +#ifndef _ASM_GENERIC_BITOPS_LE_H_ +#define _ASM_GENERIC_BITOPS_LE_H_ + +#include <asm/types.h> +#include <asm/byteorder.h> + +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#if defined(__LITTLE_ENDIAN) + +#define generic_test_le_bit(nr, addr) test_bit(nr, addr) +#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) +#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) + +#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) +#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) + +#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) +#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) + +#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) + +#elif defined(__BIG_ENDIAN) + +#define generic_test_le_bit(nr, addr) \ + test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___set_le_bit(nr, addr) \ + __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___clear_le_bit(nr, addr) \ + __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define generic_test_and_set_le_bit(nr, addr) \ + test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic_test_and_clear_le_bit(nr, addr) \ + test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define generic___test_and_set_le_bit(nr, addr) \ + __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___test_and_clear_le_bit(nr, addr) \ + __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +#else +#error "Please fix <asm/byteorder.h>" +#endif + +#define generic_find_first_zero_le_bit(addr, size) \ + generic_find_next_zero_le_bit((addr), (size), 0) + +#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h new file mode 100644 index 000000000000..4a981c1bb1ae --- /dev/null +++ b/include/asm-generic/bitops/minix-le.h @@ -0,0 +1,17 @@ +#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_ +#define _ASM_GENERIC_BITOPS_MINIX_LE_H_ + +#include <asm-generic/bitops/le.h> + +#define minix_test_and_set_bit(nr,addr) \ + generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) +#define minix_set_bit(nr,addr) \ + generic___set_le_bit((nr),(unsigned long *)(addr)) +#define minix_test_and_clear_bit(nr,addr) \ + generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) +#define minix_test_bit(nr,addr) \ + generic_test_le_bit((nr),(unsigned long *)(addr)) +#define minix_find_first_zero_bit(addr,size) \ + generic_find_first_zero_le_bit((unsigned long *)(addr),(size)) + +#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */ diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h new file mode 100644 index 000000000000..91f42e87aa51 --- /dev/null +++ b/include/asm-generic/bitops/minix.h @@ -0,0 +1,15 @@ +#ifndef _ASM_GENERIC_BITOPS_MINIX_H_ +#define _ASM_GENERIC_BITOPS_MINIX_H_ + +#define minix_test_and_set_bit(nr,addr) \ + __test_and_set_bit((nr),(unsigned long *)(addr)) +#define minix_set_bit(nr,addr) \ + __set_bit((nr),(unsigned long *)(addr)) +#define minix_test_and_clear_bit(nr,addr) \ + __test_and_clear_bit((nr),(unsigned long *)(addr)) +#define minix_test_bit(nr,addr) \ + test_bit((nr),(unsigned long *)(addr)) +#define minix_find_first_zero_bit(addr,size) \ + find_first_zero_bit((unsigned long *)(addr),(size)) + +#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 000000000000..46a825cf2ae1 --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h @@ -0,0 +1,111 @@ +#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + +#include <asm/types.h> + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p |= mask; +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p &= ~mask; +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p ^= mask; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 000000000000..5ef93a4d009f --- /dev/null +++ b/include/asm-generic/bitops/sched.h @@ -0,0 +1,36 @@ +#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ +#define _ASM_GENERIC_BITOPS_SCHED_H_ + +#include <linux/compiler.h> /* unlikely() */ +#include <asm/types.h> + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int sched_find_first_bit(const unsigned long *b) +{ +#if BITS_PER_LONG == 64 + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 64; + return __ffs(b[2]) + 128; +#elif BITS_PER_LONG == 32 + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +#else +#error BITS_PER_LONG not defined +#endif +} + +#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 3ae2c7347549..df893c160318 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -49,5 +49,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + return -ENOSYS; +} + #endif #endif diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index 16fc00360f75..de4614840c2c 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -4,28 +4,28 @@ #include <linux/config.h> #include <linux/percpu.h> #include <linux/hardirq.h> +#include <asm/atomic.h> #include <asm/types.h> /* An unsigned long type for operations which are atomic for a single * CPU. Usually used in combination with per-cpu variables. */ -#if BITS_PER_LONG == 32 /* Implement in terms of atomics. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { - atomic_t a; + atomic_long_t a; } local_t; -#define LOCAL_INIT(i) { ATOMIC_INIT(i) } +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } -#define local_read(l) ((unsigned long)atomic_read(&(l)->a)) -#define local_set(l,i) atomic_set((&(l)->a),(i)) -#define local_inc(l) atomic_inc(&(l)->a) -#define local_dec(l) atomic_dec(&(l)->a) -#define local_add(i,l) atomic_add((i),(&(l)->a)) -#define local_sub(i,l) atomic_sub((i),(&(l)->a)) +#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a)) +#define local_set(l,i) atomic_long_set((&(l)->a),(i)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ @@ -34,68 +34,6 @@ typedef struct #define __local_add(i,l) local_set((l), local_read(l) + (i)) #define __local_sub(i,l) local_set((l), local_read(l) - (i)) -#else /* ... can't use atomics. */ -/* Implement in terms of three variables. - Another option would be to use local_irq_save/restore. */ - -typedef struct -{ - /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */ - unsigned long v[3]; -} local_t; - -#define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()]) - -#define LOCAL_INIT(i) { { (i), 0, 0 } } - -static inline unsigned long local_read(local_t *l) -{ - return l->v[0] + l->v[1] + l->v[2]; -} - -static inline void local_set(local_t *l, unsigned long v) -{ - l->v[0] = v; - l->v[1] = l->v[2] = 0; -} - -static inline void local_inc(local_t *l) -{ - preempt_disable(); - _LOCAL_VAR(l)++; - preempt_enable(); -} - -static inline void local_dec(local_t *l) -{ - preempt_disable(); - _LOCAL_VAR(l)--; - preempt_enable(); -} - -static inline void local_add(unsigned long v, local_t *l) -{ - preempt_disable(); - _LOCAL_VAR(l) += v; - preempt_enable(); -} - -static inline void local_sub(unsigned long v, local_t *l) -{ - preempt_disable(); - _LOCAL_VAR(l) -= v; - preempt_enable(); -} - -/* Non-atomic variants, ie. preemption disabled and won't be touched - * in interrupt, etc. Some archs can optimize this case well. */ -#define __local_inc(l) ((l)->v[0]++) -#define __local_dec(l) ((l)->v[0]--) -#define __local_add(i,l) ((l)->v[0] += (i)) -#define __local_sub(i,l) ((l)->v[0] -= (i)) - -#endif /* Non-atomic implementation */ - /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable (eg. mystruct.foo), not an address. diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h new file mode 100644 index 000000000000..0cfb086dd373 --- /dev/null +++ b/include/asm-generic/memory_model.h @@ -0,0 +1,77 @@ +#ifndef __ASM_MEMORY_MODEL_H +#define __ASM_MEMORY_MODEL_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FLATMEM) + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (0UL) +#endif + +#elif defined(CONFIG_DISCONTIGMEM) + +#ifndef arch_pfn_to_nid +#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) +#endif + +#ifndef arch_local_page_offset +#define arch_local_page_offset(pfn, nid) \ + ((pfn) - NODE_DATA(nid)->node_start_pfn) +#endif + +#endif /* CONFIG_DISCONTIGMEM */ + +#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE +struct page; +/* this is useful when inlined pfn_to_page is too big */ +extern struct page *pfn_to_page(unsigned long pfn); +extern unsigned long page_to_pfn(struct page *page); +#else +/* + * supports 3 memory models. + */ +#if defined(CONFIG_FLATMEM) + +#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ + ARCH_PFN_OFFSET) +#elif defined(CONFIG_DISCONTIGMEM) + +#define pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + unsigned long __nid = arch_pfn_to_nid(pfn); \ + NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ +}) + +#define page_to_pfn(pg) \ +({ struct page *__pg = (pg); \ + struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ + (unsigned long)(__pg - __pgdat->node_mem_map) + \ + __pgdat->node_start_pfn; \ +}) + +#elif defined(CONFIG_SPARSEMEM) +/* + * Note: section's mem_map is encorded to reflect its start_pfn. + * section[i].section_mem_map == mem_map's address - start_pfn; + */ +#define page_to_pfn(pg) \ +({ struct page *__pg = (pg); \ + int __sec = page_to_section(__pg); \ + __pg - __section_mem_map_addr(__nr_to_section(__sec)); \ +}) + +#define pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + struct mem_section *__sec = __pfn_to_section(__pfn); \ + __section_mem_map_addr(__sec) + __pfn; \ +}) +#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 78cf45547e31..c0caf433a7d7 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -19,7 +19,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ + for_each_possible_cpu(__i) \ memcpy((pcpudst)+__per_cpu_offset[__i], \ (src), (size)); \ } while (0) diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h index ff7c2b721594..574f57b6c4d1 100644 --- a/include/asm-h8300/bitops.h +++ b/include/asm-h8300/bitops.h @@ -8,7 +8,6 @@ #include <linux/config.h> #include <linux/compiler.h> -#include <asm/byteorder.h> /* swab32 */ #include <asm/system.h> #ifdef __KERNEL__ @@ -177,10 +176,7 @@ H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") #undef H8300_GEN_TEST_BITOP_CONST_INT #undef H8300_GEN_TEST_BITOP -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -#define ffs(x) generic_ffs(x) +#include <asm-generic/bitops/ffs.h> static __inline__ unsigned long __ffs(unsigned long word) { @@ -196,216 +192,16 @@ static __inline__ unsigned long __ffs(unsigned long word) return result; } -static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) -{ - unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -static __inline__ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) - return result + size; -found_middle: - return result + __ffs(tmp); -} - -#define find_first_bit(addr, size) find_next_bit(addr, size, 0) - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -static __inline__ int ext2_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR |= mask; - local_irq_restore(flags); - return retval; -} -#define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) - -static __inline__ int ext2_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR &= ~mask; - local_irq_restore(flags); - return retval; -} -#define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) - -static __inline__ int ext2_test_bit(int nr, const volatile void * addr) -{ - int mask; - const volatile unsigned char *ADDR = (const unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - return ((mask & *ADDR) != 0); -} - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease performance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #endif /* _H8300_BITOPS_H */ diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h index cd35b1cc6cde..6472c9f88227 100644 --- a/include/asm-h8300/page.h +++ b/include/asm-h8300/page.h @@ -71,8 +71,7 @@ extern unsigned long memory_end; #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) #define pfn_valid(page) (page < max_mapnr) -#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) -#define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ ((void *)(kaddr) < (void *)memory_end)) @@ -81,6 +80,7 @@ extern unsigned long memory_end; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _H8300_PAGE_H */ diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h index bf91e0d4dde7..da2402b86540 100644 --- a/include/asm-h8300/types.h +++ b/include/asm-h8300/types.h @@ -58,6 +58,9 @@ typedef u32 dma_addr_t; #define HAVE_SECTOR_T typedef u64 sector_t; +#define HAVE_BLKCNT_T +typedef u64 blkcnt_t; + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 7d20b95edb3b..08deaeee6be9 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -362,28 +362,9 @@ static inline unsigned long ffz(unsigned long word) return word; } -#define fls64(x) generic_fls64(x) - #ifdef __KERNEL__ -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} +#include <asm-generic/bitops/sched.h> /** * ffs - find first bit set @@ -421,42 +402,22 @@ static inline int fls(int x) return r+1; } -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #endif /* __KERNEL__ */ +#include <asm-generic/bitops/fls64.h> + #ifdef __KERNEL__ -#define ext2_set_bit(nr,addr) \ - __test_and_set_bit((nr),(unsigned long*)addr) +#include <asm-generic/bitops/ext2-non-atomic.h> + #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr, addr) \ test_and_clear_bit((nr),(unsigned long*)addr) -#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_bit((unsigned long*)addr, size) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_bit((unsigned long*)addr, size, off) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) -#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) -#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit((void*)addr,size) + +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h index 44b9db806474..7b8ceefd010f 100644 --- a/include/asm-i386/futex.h +++ b/include/asm-i386/futex.h @@ -104,5 +104,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__( + "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" + + "2: .section .fixup, \"ax\" \n" + "3: mov %2, %0 \n" + " jmp 2b \n" + " .previous \n" + + " .section __ex_table, \"a\" \n" + " .align 8 \n" + " .long 1b,3b \n" + " .previous \n" + + : "=a" (oldval), "=m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" + ); + + return oldval; +} + #endif #endif diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index 316138e89910..96d0828ce096 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h @@ -17,11 +17,9 @@ struct die_args { int signr; }; -/* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *i386die_chain; +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head i386die_chain; /* Grossly misnamed. */ @@ -51,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str, .trapnr = trap, .signr = sig }; - return notifier_call_chain(&i386die_chain, val, &args); + return atomic_notifier_call_chain(&i386die_chain, val, &args); } #endif diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index a0d2d74a7dda..57d157c5cf89 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -34,6 +34,7 @@ struct pt_regs; typedef u8 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xcc +#define RELATIVEJUMP_INSTRUCTION 0xe9 #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ @@ -51,6 +52,11 @@ void kretprobe_trampoline(void); struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; + /* + * If this flag is not 0, this kprobe can be boost when its + * post_handler and break_handler is not set. + */ + int boostable; }; struct prev_kprobe { diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h index b749aa44a86f..31eb5de6f3dc 100644 --- a/include/asm-i386/mach-default/mach_time.h +++ b/include/asm-i386/mach-default/mach_time.h @@ -82,21 +82,8 @@ static inline int mach_set_rtc_mmss(unsigned long nowtime) static inline unsigned long mach_get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - do { /* Isn't this overkill ? UIP above should guarantee consistency */ + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); @@ -104,16 +91,18 @@ static inline unsigned long mach_get_cmos_time(void) mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } - if ((year += 1900) < 1970) + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + + year += 1900; + if (year < 1970) year += 100; return mktime(year, mon, day, hour, min, sec); diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 74f595d80579..e33e9f9e4c66 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -70,8 +70,6 @@ static inline int pfn_to_nid(unsigned long pfn) #endif } -#define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) - /* * Following are macros that each numa implmentation must define. */ @@ -86,21 +84,6 @@ static inline int pfn_to_nid(unsigned long pfn) /* XXX: FIXME -- wli */ #define kern_addr_valid(kaddr) (0) -#define pfn_to_page(pfn) \ -({ \ - unsigned long __pfn = pfn; \ - int __node = pfn_to_nid(__pfn); \ - &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ -}) - -#define page_to_pfn(pg) \ -({ \ - struct page *__page = pg; \ - struct zone *__zone = page_zone(__page); \ - (unsigned long)(__page - __zone->zone_mem_map) \ - + __zone->zone_start_pfn; \ -}) - #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ #define pfn_valid(pfn) ((pfn) < num_physpages) #else diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index 997ca5d17876..30f52a2263ba 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h @@ -126,8 +126,6 @@ extern int page_is_ram(unsigned long pagenr); #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) @@ -141,6 +139,7 @@ extern int page_is_ram(unsigned long pagenr); #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _I386_PAGE_H */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index feca5d961e2b..805f0dcda468 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -20,6 +20,7 @@ #include <linux/config.h> #include <linux/threads.h> #include <asm/percpu.h> +#include <linux/cpumask.h> /* flag for disabling the tsc */ extern int tsc_disable; @@ -67,6 +68,9 @@ struct cpuinfo_x86 { char pad0; int x86_power; unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char apicid; @@ -103,6 +107,7 @@ extern struct cpuinfo_x86 cpu_data[]; extern int phys_proc_id[NR_CPUS]; extern int cpu_core_id[NR_CPUS]; +extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); @@ -616,8 +621,6 @@ struct extended_sigtable { unsigned int reserved[3]; struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 826a8ca50ac8..ee941457b55d 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h @@ -6,9 +6,7 @@ #ifndef _i386_SETUP_H #define _i386_SETUP_H -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) +#include <linux/pfn.h> /* * Reserved space for vmalloc and iomap - defined in asm/page.h diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h index b464f8020ec4..67eae78323ba 100644 --- a/include/asm-i386/stat.h +++ b/include/asm-i386/stat.h @@ -58,8 +58,7 @@ struct stat64 { long long st_size; unsigned long st_blksize; - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - unsigned long __pad4; /* future possible st_blocks high bits */ + unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_atime; unsigned long st_atime_nsec; diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index aa958c6ee83e..b94e5eeef917 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -112,4 +112,6 @@ extern unsigned long node_remap_size[]; #endif /* CONFIG_NUMA */ +extern cpumask_t cpu_coregroup_map(int cpu); + #endif /* _ASM_I386_TOPOLOGY_H */ diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h index ced00fe8fe61..e50a08bd7ced 100644 --- a/include/asm-i386/types.h +++ b/include/asm-i386/types.h @@ -63,6 +63,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d8afd0e3b81a..014e3562895b 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -316,8 +316,10 @@ #define __NR_pselect6 308 #define __NR_ppoll 309 #define __NR_unshare 310 +#define __NR_set_robust_list 311 +#define __NR_get_robust_list 312 -#define NR_syscalls 311 +#define NR_syscalls 313 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 36d0fb95ea89..90921e162793 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -5,8 +5,8 @@ * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * - * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) - * scheduler patch + * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 + * O(1) scheduler patch */ #include <linux/compiler.h> @@ -25,9 +25,9 @@ * restricted to acting on a single-word quantity. * * The address must be (at least) "long" aligned. - * Note that there are driver (e.g., eepro100) which use these operations to operate on - * hw-defined data-structures, so we can't easily change these operations to force a - * bigger alignment. + * Note that there are driver (e.g., eepro100) which use these operations to + * operate on hw-defined data-structures, so we can't easily change these + * operations to force a bigger alignment. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ @@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr) * ffz - find the first zero bit in a long word * @x: The long word to find the bit in * - * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if - * no zero exists, so code should check against ~0UL first... + * Returns the bit-number (0..63) of the first (least significant) zero bit. + * Undefined if no zero exists, so code should check against ~0UL first... */ static inline unsigned long ffz (unsigned long x) @@ -345,13 +345,14 @@ fls (int t) x |= x >> 16; return ia64_popcnt(x); } -#define fls64(x) generic_fls64(x) + +#include <asm-generic/bitops/fls64.h> /* - * ffs: find first bit set. This is defined the same way as the libc and compiler builtin - * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on - * "int" values only and the result value is the bit number + 1. ffs(0) is defined to - * return zero. + * ffs: find first bit set. This is defined the same way as the libc and + * compiler builtin ffs routines, therefore differs in spirit from the above + * ffz (man ffs): it operates on "int" values only and the result value is the + * bit number + 1. ffs(0) is defined to return zero. */ #define ffs(x) __builtin_ffs(x) @@ -373,51 +374,17 @@ hweight64 (unsigned long x) #endif /* __KERNEL__ */ -extern int __find_next_zero_bit (const void *addr, unsigned long size, - unsigned long offset); -extern int __find_next_bit(const void *addr, unsigned long size, - unsigned long offset); - -#define find_next_zero_bit(addr, size, offset) \ - __find_next_zero_bit((addr), (size), (offset)) -#define find_next_bit(addr, size, offset) \ - __find_next_bit((addr), (size), (offset)) - -/* - * The optimizer actually does good code for this case.. - */ -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) - -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -#define __clear_bit(nr, addr) clear_bit(nr, addr) +#include <asm-generic/bitops/ext2-non-atomic.h> -#define ext2_set_bit test_and_set_bit #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit test_and_clear_bit #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) -static inline int -sched_find_first_bit (unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return 64 + __ffs(b[1]); - return __ffs(b[2]) + 128; -} +#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index c0b19106665c..40d01d80610d 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h @@ -189,6 +189,12 @@ compat_ptr (compat_uptr_t uptr) return (void __user *) (unsigned long) uptr; } +static inline compat_uptr_t +ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + static __inline__ void __user * compat_alloc_user_space (long len) { diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h new file mode 100644 index 000000000000..f3efaa229525 --- /dev/null +++ b/include/asm-ia64/dmi.h @@ -0,0 +1,6 @@ +#ifndef _ASM_DMI_H +#define _ASM_DMI_H 1 + +#include <asm/io.h> + +#endif diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index b64fdb985494..c2e3742108bb 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -88,8 +88,8 @@ phys_to_virt (unsigned long address) } #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ -extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); +extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ +extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); /* * The following two macros are deprecated and scheduled for removal. @@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr) # define outl_p outl #endif -/* - * An "address" in IO memory space is not clearly either an integer or a pointer. We will - * accept both, thus the casts. - * - * On ia-64, we access the physical I/O memory space through the uncached kernel region. - */ -static inline void __iomem * -ioremap (unsigned long offset, unsigned long size) -{ - return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); -} +extern void __iomem * ioremap(unsigned long offset, unsigned long size); +extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); static inline void iounmap (volatile void __iomem *addr) { } -#define ioremap_nocache(o,s) ioremap(o,s) +/* Use normal IO mappings for DMI */ +#define dmi_ioremap ioremap +#define dmi_iounmap(x,l) iounmap(x) +#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) # ifdef __KERNEL__ diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 8b01a083dde6..218c458ab60c 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -40,7 +40,7 @@ struct die_args { extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); -extern struct notifier_block *ia64die_chain; +extern struct atomic_notifier_head ia64die_chain; enum die_val { DIE_BREAK = 1, @@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, .signr = sig }; - return notifier_call_chain(&ia64die_chain, val, &args); + return atomic_notifier_call_chain(&ia64die_chain, val, &args); } #endif diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 6e9aa23250c4..2087825eefa4 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -106,17 +106,25 @@ extern int ia64_pfn_valid (unsigned long pfn); # define ia64_pfn_valid(pfn) 1 #endif +#ifdef CONFIG_VIRTUAL_MEM_MAP +extern struct page *vmem_map; +#ifdef CONFIG_DISCONTIGMEM +# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) +# define pfn_to_page(pfn) (vmem_map + (pfn)) +#endif +#endif + +#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM) +/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */ +#include <asm-generic/memory_model.h> +#endif + #ifdef CONFIG_FLATMEM # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -# define page_to_pfn(page) ((unsigned long) (page - mem_map)) -# define pfn_to_page(pfn) (mem_map + (pfn)) #elif defined(CONFIG_DISCONTIGMEM) -extern struct page *vmem_map; extern unsigned long min_low_pfn; extern unsigned long max_low_pfn; # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) -# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) -# define pfn_to_page(pfn) (vmem_map + (pfn)) #endif #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 244449df7411..bf4cc867a698 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -159,7 +159,7 @@ static inline u32 sn_sal_rev(void) { - struct ia64_sal_systab *systab = efi.sal_systab; + struct ia64_sal_systab *systab = __va(efi.sal_systab); return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); } diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h index abea2fdd8689..902a366101a5 100644 --- a/include/asm-m32r/bitops.h +++ b/include/asm-m32r/bitops.h @@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) } /** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __inline__ void __set_bit(int nr, volatile void * addr) -{ - __u32 mask; - volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - *a |= mask; -} - -/** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from @@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) -{ - unsigned long mask; - volatile unsigned long *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - *a &= ~mask; -} - #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() /** - * __change_bit - Toggle a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __inline__ void __change_bit(int nr, volatile void * addr) -{ - __u32 mask; - volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - *a ^= mask; -} - -/** * change_bit - Toggle a bit in memory * @nr: Bit to clear * @addr: Address to start counting from @@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) } /** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) -{ - __u32 mask, oldbit; - volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - oldbit = (*a & mask); - *a |= mask; - - return (oldbit != 0); -} - -/** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to set * @addr: Address to count from @@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) -{ - __u32 mask, oldbit; - volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - oldbit = (*a & mask); - *a &= ~mask; - - return (oldbit != 0); -} - -/* WARNING: non atomic and it can be reordered! */ -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) -{ - __u32 mask, oldbit; - volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - oldbit = (*a & mask); - *a ^= mask; - - return (oldbit != 0); -} - -/** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to set * @addr: Address to count from @@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) return (oldbit != 0); } -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static __inline__ int test_bit(int nr, const volatile void * addr) -{ - __u32 mask; - const volatile __u32 *a = addr; - - a += (nr >> 5); - mask = (1 << (nr & 0x1F)); - - return ((*a & mask) != 0); -} - -/** - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static __inline__ unsigned long ffz(unsigned long word) -{ - int k; - - word = ~word; - k = 0; - if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } - if (!(word & 0x000000ff)) { k += 8; word >>= 8; } - if (!(word & 0x0000000f)) { k += 4; word >>= 4; } - if (!(word & 0x00000003)) { k += 2; word >>= 2; } - if (!(word & 0x00000001)) { k += 1; } - - return k; -} - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static __inline__ int find_next_zero_bit(const unsigned long *addr, - int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static __inline__ unsigned long __ffs(unsigned long word) -{ - int k = 0; - - if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } - if (!(word & 0x000000ff)) { k += 8; word >>= 8; } - if (!(word & 0x0000000f)) { k += 4; word >>= 4; } - if (!(word & 0x00000003)) { k += 2; word >>= 2; } - if (!(word & 0x00000001)) { k += 1;} - - return k; -} - -/* - * fls: find last bit set. - */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #ifdef __KERNEL__ -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned int *p = ((unsigned int *) addr) + (offset >> 5); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -#define ffs(x) generic_ffs(x) - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/hweight.h> #endif /* __KERNEL__ */ #ifdef __KERNEL__ -/* - * ext2_XXXX function - * orig: include/asm-sh/bitops.h - */ - -#ifdef __LITTLE_ENDIAN__ -#define ext2_set_bit test_and_set_bit -#define ext2_clear_bit __test_and_clear_bit -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit -#else -static inline int ext2_set_bit(int nr, volatile void * addr) -{ - __u8 mask, oldbit; - volatile __u8 *a = addr; - - a += (nr >> 3); - mask = (1 << (nr & 0x07)); - oldbit = (*a & mask); - *a |= mask; - - return (oldbit != 0); -} - -static inline int ext2_clear_bit(int nr, volatile void * addr) -{ - __u8 mask, oldbit; - volatile __u8 *a = addr; - - a += (nr >> 3); - mask = (1 << (nr & 0x07)); - oldbit = (*a & mask); - *a &= ~mask; - - return (oldbit != 0); -} - -static inline int ext2_test_bit(int nr, const volatile void * addr) -{ - __u32 mask; - const volatile __u8 *a = addr; - - a += (nr >> 3); - mask = (1 << (nr & 0x07)); - - return ((mask & *a) != 0); -} - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static inline unsigned long ext2_find_next_zero_bit(void *addr, - unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease preformance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} -#endif - -#define ext2_set_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_set_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -#define ext2_clear_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_clear_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) __set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h index adc7970a77ec..9f3b5accda88 100644 --- a/include/asm-m32r/mmzone.h +++ b/include/asm-m32r/mmzone.h @@ -21,20 +21,6 @@ extern struct pglist_data *node_data[]; __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ }) -#define pfn_to_page(pfn) \ -({ \ - unsigned long __pfn = pfn; \ - int __node = pfn_to_nid(__pfn); \ - &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ -}) - -#define page_to_pfn(pg) \ -({ \ - struct page *__page = pg; \ - struct zone *__zone = page_zone(__page); \ - (unsigned long)(__page - __zone->zone_mem_map) \ - + __zone->zone_start_pfn; \ -}) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) /* * pfn_valid should be made as fast as possible, and the current definition diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h index 4ab578876361..9ddbc087dbc5 100644 --- a/include/asm-m32r/page.h +++ b/include/asm-m32r/page.h @@ -76,9 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifndef CONFIG_DISCONTIGMEM #define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT) -#define pfn_to_page(pfn) (mem_map + ((pfn) - PFN_BASE)) -#define page_to_pfn(page) \ - ((unsigned long)((page) - mem_map) + PFN_BASE) +#define ARCH_PFN_OFFSET PFN_BASE #define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr) #endif /* !CONFIG_DISCONTIGMEM */ @@ -92,6 +90,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _ASM_M32R_PAGE_H */ diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h index 5f028dc26a9b..52f4fa29abfc 100644 --- a/include/asm-m32r/setup.h +++ b/include/asm-m32r/setup.h @@ -24,10 +24,6 @@ #define RAMDISK_PROMPT_FLAG (0x8000) #define RAMDISK_LOAD_FLAG (0x4000) -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - extern unsigned long memory_start; extern unsigned long memory_end; diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h index 13f4c0048463..1a61fdb56aaf 100644 --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h @@ -310,36 +310,10 @@ static inline int fls(int x) return 32 - cnt; } -#define fls64(x) generic_fls64(x) -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> /* Bitmap functions for the minix filesystem */ @@ -365,9 +339,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) return ((p - addr) << 4) + (res ^ 31); } -#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) -#define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr)) -#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) +#define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) +#define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) +#define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) static inline int minix_test_bit(int nr, const void *vaddr) { @@ -377,9 +351,9 @@ static inline int minix_test_bit(int nr, const void *vaddr) /* Bitmap functions for the ext2 filesystem. */ -#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) +#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) -#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) +#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) static inline int ext2_test_bit(int nr, const void *vaddr) diff --git a/include/asm-m68k/stat.h b/include/asm-m68k/stat.h index c4c402a45e21..dd38bc2e9f98 100644 --- a/include/asm-m68k/stat.h +++ b/include/asm-m68k/stat.h @@ -60,8 +60,7 @@ struct stat64 { long long st_size; unsigned long st_blksize; - unsigned long __pad4; /* future possible st_blocks high bits */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_atime; unsigned long st_atime_nsec; diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index 25d8a3cfef90..0b68ccd327f7 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h @@ -12,104 +12,10 @@ #ifdef __KERNEL__ -/* - * Generic ffs(). - */ -static inline int ffs(int x) -{ - int r = 1; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -/* - * Generic __ffs(). - */ -static inline int __ffs(int x) -{ - int r = 0; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static __inline__ unsigned long ffz(unsigned long word) -{ - unsigned long result = 0; - - while(word & 1) { - result++; - word >>= 1; - } - return result; -} - +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffz.h> static __inline__ void set_bit(int nr, volatile unsigned long * addr) { @@ -254,98 +160,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) __constant_test_bit((nr),(addr)) : \ __test_bit((nr),(addr))) -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -/* - * Find next one bit in a bitmap reasonably efficiently. - */ -static __inline__ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned int *p = ((unsigned int *) addr) + (offset >> 5); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/hweight.h> static __inline__ int ext2_set_bit(int nr, volatile void * addr) { @@ -475,30 +291,11 @@ found_middle: return result + ffz(__swab32(tmp)); } -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ -/* - * fls: find last bit set. - */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #endif /* _M68KNOMMU_BITOPS_H */ diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 8e802059fe67..a1728f8c0705 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } /* - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - - *m |= 1UL << (nr & SZLONG_MASK); -} - -/* * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from @@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } /* - * __clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * Unlike clear_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - - *m &= ~(1UL << (nr & SZLONG_MASK)); -} - -/* * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from @@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) } /* - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - - *m ^= 1UL << (nr & SZLONG_MASK); -} - -/* * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from @@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr, } /* - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - volatile unsigned long *a = addr; - unsigned long mask; - int retval; - - a += nr >> SZLONG_LOG; - mask = 1UL << (nr & SZLONG_MASK); - retval = (mask & *a) != 0; - *a |= mask; - - return retval; -} - -/* * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from @@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr, } /* - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(unsigned long nr, - volatile unsigned long * addr) -{ - volatile unsigned long *a = addr; - unsigned long mask; - int retval; - - a += (nr >> SZLONG_LOG); - mask = 1UL << (nr & SZLONG_MASK); - retval = ((mask & *a) != 0); - *a &= ~mask; - - return retval; -} - -/* * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from @@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr, } } -/* - * __test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_change_bit(unsigned long nr, - volatile unsigned long *addr) -{ - volatile unsigned long *a = addr; - unsigned long mask; - int retval; - - a += (nr >> SZLONG_LOG); - mask = 1UL << (nr & SZLONG_MASK); - retval = ((mask & *a) != 0); - *a ^= mask; - - return retval; -} - #undef __bi_flags #undef __bi_local_irq_save #undef __bi_local_irq_restore -/* - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); -} +#include <asm-generic/bitops/non-atomic.h> /* * Return the bit position (0..63) of the most significant 1 bit in a word @@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x) return 63 - lz; } +#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) + /* * __ffs - find first bit in word. * @word: The word to search @@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x) */ static inline unsigned long __ffs(unsigned long word) { -#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) return __ilog2(word & -word); -#else - int b = 0, s; - -#ifdef CONFIG_32BIT - s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; - s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; - s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; - s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; - s = 1; if (word << 31 != 0) s = 0; b += s; - - return b; -#endif -#ifdef CONFIG_64BIT - s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; - s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; - s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; - s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; - s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; - s = 1; if (word << 63 != 0) s = 0; b += s; - - return b; -#endif -#endif } /* @@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word) */ static inline unsigned long fls(unsigned long word) { -#ifdef CONFIG_32BIT #ifdef CONFIG_CPU_MIPS32 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); return 32 - word; -#else - { - int r = 32, s; - - if (word == 0) - return 0; - - s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; - s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; - s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; - s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s; - s = 1; if ((word & 0x80000000)) s = 0; r -= s; - - return r; - } #endif -#endif /* CONFIG_32BIT */ -#ifdef CONFIG_64BIT #ifdef CONFIG_CPU_MIPS64 - __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); return 64 - word; -#else - { - int r = 64, s; - - if (word == 0) - return 0; - - s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; - s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; - s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; - s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s; - s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s; - s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; - - return r; - } #endif -#endif /* CONFIG_64BIT */ } -#define fls64(x) generic_fls64(x) - -/* - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline unsigned long find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> SZLONG_LOG); - unsigned long result = offset & ~SZLONG_MASK; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= SZLONG_MASK; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (_MIPS_SZLONG-offset); - if (size < _MIPS_SZLONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= _MIPS_SZLONG; - result += _MIPS_SZLONG; - } - while (size & ~SZLONG_MASK) { - if (~(tmp = *(p++))) - goto found_middle; - result += _MIPS_SZLONG; - size -= _MIPS_SZLONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ -found_middle: - return result + ffz(tmp); -} +#else -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls.h> -/* - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> SZLONG_LOG); - unsigned long result = offset & ~SZLONG_MASK; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= SZLONG_MASK; - if (offset) { - tmp = *(p++); - tmp &= ~0UL << offset; - if (size < _MIPS_SZLONG) - goto found_first; - if (tmp) - goto found_middle; - size -= _MIPS_SZLONG; - result += _MIPS_SZLONG; - } - while (size & ~SZLONG_MASK) { - if ((tmp = *(p++))) - goto found_middle; - result += _MIPS_SZLONG; - size -= _MIPS_SZLONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (_MIPS_SZLONG - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} +#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ -/* - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ -#ifdef CONFIG_32BIT - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -#endif -#ifdef CONFIG_64BIT - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -#endif -} - -/* - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) -{ - unsigned char *ADDR = (unsigned char *) addr; - int mask, retval; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - retval = (mask & *ADDR) != 0; - *ADDR |= mask; - - return retval; -} - -static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) -{ - unsigned char *ADDR = (unsigned char *) addr; - int mask, retval; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - retval = (mask & *ADDR) != 0; - *ADDR &= ~mask; - - return retval; -} - -static inline int test_le_bit(unsigned long nr, const unsigned long * addr) -{ - const unsigned char *ADDR = (const unsigned char *) addr; - int mask; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - - return ((mask & *ADDR) != 0); -} - -static inline unsigned long find_next_zero_le_bit(unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); - unsigned long result = offset & ~SZLONG_MASK; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= SZLONG_MASK; - if (offset) { - tmp = cpu_to_lelongp(p++); - tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ - if (size < _MIPS_SZLONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= _MIPS_SZLONG; - result += _MIPS_SZLONG; - } - while (size & ~SZLONG_MASK) { - if (~(tmp = cpu_to_lelongp(p++))) - goto found_middle; - result += _MIPS_SZLONG; - size -= _MIPS_SZLONG; - } - if (!size) - return result; - tmp = cpu_to_lelongp(p); - -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ - -found_middle: - return result + ffz(tmp); -} - -#define find_first_zero_le_bit(addr, size) \ - find_next_zero_le_bit((addr), (size), 0) - -#define ext2_set_bit(nr,addr) \ - __test_and_set_le_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_le_bit((nr),(unsigned long*)addr) - #define ext2_set_bit_atomic(lock, nr, addr) \ -({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_set_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ -}) - -#define ext2_clear_bit_atomic(lock, nr, addr) \ -({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_clear_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ -}) -#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_le_bit((unsigned long*)addr, size) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_le_bit((unsigned long*)addr, size, off) - -/* - * Bitmap functions for the minix filesystem. - * - * FIXME: These assume that Minix uses the native byte/bitorder. - * This limits the Minix filesystem's value for data exchange very much. - */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index 0012bd804d2d..986511db54a6 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h @@ -133,6 +133,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) return (void __user *)(long)uptr; } +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + static inline void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = (struct pt_regs *) diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h index 2454c44a8f54..a554089991f2 100644 --- a/include/asm-mips/futex.h +++ b/include/asm-mips/futex.h @@ -99,5 +99,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + return -ENOSYS; +} + #endif #endif diff --git a/include/asm-mips/mc146818-time.h b/include/asm-mips/mc146818-time.h index 47214861093b..41ac8d363c67 100644 --- a/include/asm-mips/mc146818-time.h +++ b/include/asm-mips/mc146818-time.h @@ -86,43 +86,14 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime) return retval; } -/* - * Returns true if a clock update is in progress - */ -static inline unsigned char rtc_is_updating(void) -{ - unsigned char uip; - unsigned long flags; - - spin_lock_irqsave(&rtc_lock, flags); - uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irqrestore(&rtc_lock, flags); - return uip; -} - static inline unsigned long mc146818_get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; unsigned long flags; - /* - * The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (rtc_is_updating()) - break; - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!rtc_is_updating()) - break; - spin_lock_irqsave(&rtc_lock, flags); - do { /* Isn't this overkill ? UIP above should guarantee consistency */ + + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h index 011caebac369..7bde4432092b 100644 --- a/include/asm-mips/mmzone.h +++ b/include/asm-mips/mmzone.h @@ -22,20 +22,6 @@ NODE_DATA(__n)->node_spanned_pages) : 0);\ }) -#define pfn_to_page(pfn) \ -({ \ - unsigned long __pfn = (pfn); \ - pg_data_t *__pg = NODE_DATA(pfn_to_nid(__pfn)); \ - __pg->node_mem_map + (__pfn - __pg->node_start_pfn); \ -}) - -#define page_to_pfn(p) \ -({ \ - struct page *__p = (p); \ - struct zone *__z = page_zone(__p); \ - ((__p - __z->zone_mem_map) + __z->zone_start_pfn); \ -}) - /* XXX: FIXME -- wli */ #define kern_addr_valid(addr) (0) diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index ee25a779bf49..a1eab136ff6c 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -140,8 +140,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifndef CONFIG_NEED_MULTIPLE_NODES -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif @@ -160,6 +158,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define WANT_PAGE_VIRTUAL #endif +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _ASM_PAGE_H */ diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h index 9cc3564cc2c9..d897c8bb554d 100644 --- a/include/asm-mips/time.h +++ b/include/asm-mips/time.h @@ -26,14 +26,14 @@ extern spinlock_t rtc_lock; /* * RTC ops. By default, they point to no-RTC functions. - * rtc_get_time - mktime(year, mon, day, hour, min, sec) in seconds. - * rtc_set_time - reverse the above translation and set time to RTC. - * rtc_set_mmss - similar to rtc_set_time, but only min and sec need + * rtc_mips_get_time - mktime(year, mon, day, hour, min, sec) in seconds. + * rtc_mips_set_time - reverse the above translation and set time to RTC. + * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need * to be set. Used by RTC sync-up. */ -extern unsigned long (*rtc_get_time)(void); -extern int (*rtc_set_time)(unsigned long); -extern int (*rtc_set_mmss)(unsigned long); +extern unsigned long (*rtc_mips_get_time)(void); +extern int (*rtc_mips_set_time)(unsigned long); +extern int (*rtc_mips_set_mmss)(unsigned long); /* * Timer interrupt functions. diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h index 421b3aea14cc..cd2813d8e136 100644 --- a/include/asm-mips/types.h +++ b/include/asm-mips/types.h @@ -99,6 +99,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 15d8c2b51584..900561922c4c 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h @@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) _atomic_spin_unlock_irqrestore(addr, flags); } -static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); - - *m |= 1UL << CHOP_SHIFTCOUNT(nr); -} - static __inline__ void clear_bit(int nr, volatile unsigned long * addr) { unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); @@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr) _atomic_spin_unlock_irqrestore(addr, flags); } -static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); - - *m &= ~(1UL << CHOP_SHIFTCOUNT(nr)); -} - static __inline__ void change_bit(int nr, volatile unsigned long * addr) { unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); @@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) _atomic_spin_unlock_irqrestore(addr, flags); } -static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr) -{ - unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); - - *m ^= 1UL << CHOP_SHIFTCOUNT(nr); -} - static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) { unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); @@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) return (oldbit & mask) ? 1 : 0; } -static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) -{ - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); - unsigned long oldbit; - unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); - - oldbit = *addr; - *addr = oldbit | mask; - - return (oldbit & mask) ? 1 : 0; -} - static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) { unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); @@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) return (oldbit & mask) ? 1 : 0; } -static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) -{ - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); - unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); - unsigned long oldbit; - - oldbit = *addr; - *addr = oldbit & ~mask; - - return (oldbit & mask) ? 1 : 0; -} - static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) { unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); @@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) return (oldbit & mask) ? 1 : 0; } -static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) -{ - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); - unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); - unsigned long oldbit; - - oldbit = *addr; - *addr = oldbit ^ mask; - - return (oldbit & mask) ? 1 : 0; -} - -static __inline__ int test_bit(int nr, const volatile unsigned long *address) -{ - unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); - const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG); - - return !!(*addr & mask); -} +#include <asm-generic/bitops/non-atomic.h> #ifdef __KERNEL__ @@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x) return ret; } -/* Undefined if no bit is zero. */ -#define ffz(x) __ffs(~x) +#include <asm-generic/bitops/ffz.h> /* * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) @@ -263,155 +199,22 @@ static __inline__ int fls(int x) return ret; } -#define fls64(x) generic_fls64(x) -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ -#ifdef __LP64__ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -#else - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -#endif -} +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ -/* - * This implementation of find_{first,next}_zero_bit was stolen from - * Linus' asm-alpha/bitops.h. - */ -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset) -{ - const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= (BITS_PER_LONG-1); - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (BITS_PER_LONG-offset); - if (size < BITS_PER_LONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG -1)) { - if (~(tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> SHIFT_PER_LONG); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= (BITS_PER_LONG-1); - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -#define _EXT2_HAVE_ASM_BITOPS_ +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -/* - * test_and_{set,clear}_bit guarantee atomicity without - * disabling interrupts. - */ + +#include <asm-generic/bitops/ext2-non-atomic.h> /* '3' is bits per byte */ #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) -#define ext2_test_bit(nr, addr) \ - test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) -#define ext2_set_bit(nr, addr) \ - __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) - #define ext2_set_bit_atomic(l,nr,addr) \ test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) #define ext2_clear_bit_atomic(l,nr,addr) \ @@ -419,77 +222,6 @@ found_middle: #endif /* __KERNEL__ */ - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -/* include/linux/byteorder does not support "unsigned long" type */ -static inline unsigned long ext2_swabp(unsigned long * x) -{ -#ifdef __LP64__ - return (unsigned long) __swab64p((u64 *) x); -#else - return (unsigned long) __swab32p((u32 *) x); -#endif -} - -/* include/linux/byteorder doesn't support "unsigned long" type */ -static inline unsigned long ext2_swab(unsigned long y) -{ -#ifdef __LP64__ - return (unsigned long) __swab64((u64) y); -#else - return (unsigned long) __swab32((u32) y); -#endif -} - -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG); - unsigned long result = offset & ~(BITS_PER_LONG - 1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= (BITS_PER_LONG - 1UL); - if (offset) { - tmp = ext2_swabp(p++); - tmp |= (~0UL >> (BITS_PER_LONG - offset)); - if (size < BITS_PER_LONG) - goto found_first; - if (~tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - - while (size & ~(BITS_PER_LONG - 1)) { - if (~(tmp = *(p++))) - goto found_middle_swap; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = ext2_swabp(p); -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. Skip ffz */ -found_middle: - return result + ffz(tmp); - -found_middle_swap: - return result + ffz(ext2_swab(tmp)); -} - - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) -#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) -#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) +#include <asm-generic/bitops/minix-le.h> #endif /* _PARISC_BITOPS_H */ diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h index 38b918feead9..289624d8b2d4 100644 --- a/include/asm-parisc/compat.h +++ b/include/asm-parisc/compat.h @@ -138,6 +138,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) return (void __user *)(unsigned long)uptr; } +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + static __inline__ void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = ¤t->thread.regs; diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h index ae039f4fd711..ceb9b73199d1 100644 --- a/include/asm-parisc/mmzone.h +++ b/include/asm-parisc/mmzone.h @@ -25,23 +25,6 @@ extern struct node_map_data node_data[]; pg_data_t *__pgdat = NODE_DATA(nid); \ __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ }) -#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid)) - -#define pfn_to_page(pfn) \ -({ \ - unsigned long __pfn = (pfn); \ - int __node = pfn_to_nid(__pfn); \ - &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ -}) - -#define page_to_pfn(pg) \ -({ \ - struct page *__page = pg; \ - struct zone *__zone = page_zone(__page); \ - BUG_ON(__zone == NULL); \ - (unsigned long)(__page - __zone->zone_mem_map) \ - + __zone->zone_start_pfn; \ -}) /* We have these possible memory map layouts: * Astro: 0-3.75, 67.75-68, 4-64 diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 4a6752b0afed..9f303c0c3cd7 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h @@ -130,8 +130,6 @@ extern int npmem_ranges; #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #ifndef CONFIG_DISCONTIGMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_DISCONTIGMEM */ @@ -152,6 +150,7 @@ extern int npmem_ranges; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _PARISC_PAGE_H */ diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h index 8e23e4c674f6..0a3face6c480 100644 --- a/include/asm-parisc/pdc.h +++ b/include/asm-parisc/pdc.h @@ -333,7 +333,7 @@ struct pdc_model { /* for PDC_MODEL */ unsigned long curr_key; }; -/* Values for PDC_MODEL_CAPABILITES non-equivalent virtual aliasing support */ +/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */ #define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */ #define PDC_MODEL_NVA_MASK (3 << 4) diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index bf6941a810b8..d1c2a4405660 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr) : "cc"); } -/* Non-atomic versions */ -static __inline__ int test_bit(unsigned long nr, - __const__ volatile unsigned long *addr) -{ - return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -static __inline__ void __set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p |= mask; -} - -static __inline__ void __clear_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p &= ~mask; -} - -static __inline__ void __change_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p ^= mask; -} - -static __inline__ int __test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -static __inline__ int __test_and_clear_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -static __inline__ int __test_and_change_bit(unsigned long nr, - volatile unsigned long *addr) -{ - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} +#include <asm-generic/bitops/non-atomic.h> /* * Return the zero-based bit position (LE, not IBM bit numbering) of @@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x) asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); return 32 - lz; } -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/fls64.h> -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) unsigned long find_next_zero_bit(const unsigned long *addr, @@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr, #define minix_find_first_zero_bit(addr,size) \ find_first_zero_le_bit((unsigned long *)addr, size) -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ -#ifdef CONFIG_PPC64 - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -#else - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -#endif -} +#include <asm-generic/bitops/sched.h> #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index 39e85f320a76..f1b3c00bc1ce 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -81,5 +81,11 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + return -ENOSYS; +} + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_FUTEX_H */ diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h index 7c16265568e0..c01786ab5fa6 100644 --- a/include/asm-powerpc/kdebug.h +++ b/include/asm-powerpc/kdebug.h @@ -16,13 +16,9 @@ struct die_args { int signr; }; -/* - Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *powerpc_die_chain; +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head powerpc_die_chain; /* Grossly misnamed. */ enum die_val { @@ -37,7 +33,7 @@ enum die_val { static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) { struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; - return notifier_call_chain(&powerpc_die_chain, val, &args); + return atomic_notifier_call_chain(&powerpc_die_chain, val, &args); } #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index 0b82df483f7f..2fbecebe1c92 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -69,8 +69,6 @@ #endif #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif @@ -200,6 +198,7 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); extern int page_is_ram(unsigned long pfn); +#include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h index ec3c2ee8bf86..baabba96e313 100644 --- a/include/asm-powerpc/types.h +++ b/include/asm-powerpc/types.h @@ -103,6 +103,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h index 538e0c8ab243..a70ba2ee552d 100644 --- a/include/asm-ppc/page.h +++ b/include/asm-ppc/page.h @@ -149,8 +149,7 @@ extern int page_is_ram(unsigned long pfn); #define __pa(x) ___pa((unsigned long)(x)) #define __va(x) ((void *)(___va((unsigned long)(x)))) -#define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART) +#define ARCH_PFN_OFFSET (PPC_PGSTART) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) @@ -175,5 +174,6 @@ extern __inline__ int get_order(unsigned long size) /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */ #define __HAVE_ARCH_GATE_AREA 1 +#include <asm-generic/memory_model.h> #endif /* __KERNEL__ */ #endif /* _PPC_PAGE_H */ diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 3628899f48bb..ca092ffb7a95 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -828,35 +828,12 @@ static inline int sched_find_first_bit(unsigned long *b) return find_first_bit(b, 140); } -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -#define ffs(x) generic_ffs(x) +#include <asm-generic/bitops/ffs.h> -/* - * fls: find last bit set. - */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight64(x) \ -({ \ - unsigned long __x = (x); \ - unsigned int __w; \ - __w = generic_hweight32((unsigned int) __x); \ - __w += generic_hweight32((unsigned int) (__x>>32)); \ - __w; \ -}) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/hweight.h> #ifdef __KERNEL__ @@ -871,11 +848,11 @@ static inline int sched_find_first_bit(unsigned long *b) */ #define ext2_set_bit(nr, addr) \ - test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) + __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_set_bit_atomic(lock, nr, addr) \ test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_clear_bit(nr, addr) \ - test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) + __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_clear_bit_atomic(lock, nr, addr) \ test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) #define ext2_test_bit(nr, addr) \ @@ -1011,18 +988,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) return offset + ext2_find_first_zero_bit(p, size); } -/* Bitmap functions for the minix filesystem. */ -/* FIXME !!! */ -#define minix_test_and_set_bit(nr,addr) \ - test_and_set_bit(nr,(unsigned long *)addr) -#define minix_set_bit(nr,addr) \ - set_bit(nr,(unsigned long *)addr) -#define minix_test_and_clear_bit(nr,addr) \ - test_and_clear_bit(nr,(unsigned long *)addr) -#define minix_test_bit(nr,addr) \ - test_bit(nr,(unsigned long *)addr) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit(addr,size) +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h index a007715f4aea..356a0b183539 100644 --- a/include/asm-s390/compat.h +++ b/include/asm-s390/compat.h @@ -128,6 +128,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); } +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + static inline void __user *compat_alloc_user_space(long len) { unsigned long stack; diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 2430c561e021..3b1138ac7e79 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h @@ -181,8 +181,6 @@ page_get_storage_key(unsigned long addr) #define PAGE_OFFSET 0x0UL #define __pa(x) (unsigned long)(x) #define __va(x) (void *)(unsigned long)(x) -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) ((pfn) < max_mapnr) @@ -193,6 +191,7 @@ page_get_storage_key(unsigned long addr) #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _S390_PAGE_H */ diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h index d0be3e477013..5738ad63537c 100644 --- a/include/asm-s390/types.h +++ b/include/asm-s390/types.h @@ -93,6 +93,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* ! __s390x__ */ #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h index dbb05d1a26d1..720afc11c2ca 100644 --- a/include/asm-sh/addrspace.h +++ b/include/asm-sh/addrspace.h @@ -13,7 +13,7 @@ #include <asm/cpu/addrspace.h> -/* Memory segments (32bit Priviledged mode addresses) */ +/* Memory segments (32bit Privileged mode addresses) */ #define P0SEG 0x00000000 #define P1SEG 0x80000000 #define P2SEG 0xa0000000 diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index 1c5260860045..e34f82508568 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h @@ -19,16 +19,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void __set_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a |= mask; -} - /* * clear_bit() doesn't provide any barrier for the compiler. */ @@ -47,16 +37,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void __clear_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a &= ~mask; -} - static __inline__ void change_bit(int nr, volatile void * addr) { int mask; @@ -70,16 +50,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void __change_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a ^= mask; -} - static __inline__ int test_and_set_bit(int nr, volatile void * addr) { int mask, retval; @@ -96,19 +66,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a |= mask; - - return retval; -} - static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; @@ -125,19 +82,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a &= ~mask; - - return retval; -} - static __inline__ int test_and_change_bit(int nr, volatile void * addr) { int mask, retval; @@ -154,23 +98,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a ^= mask; - - return retval; -} - -static __inline__ int test_bit(int nr, const volatile void *addr) -{ - return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); -} +#include <asm-generic/bitops/non-atomic.h> static __inline__ unsigned long ffz(unsigned long word) { @@ -206,271 +134,15 @@ static __inline__ unsigned long __ffs(unsigned long word) return result; } -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static __inline__ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned int *p = ((unsigned int *) addr) + (offset >> 5); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -#define ffs(x) generic_ffs(x) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ - -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -#ifdef __LITTLE_ENDIAN__ -#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) -#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) -#define ext2_test_bit(nr, addr) test_bit((nr), (addr)) -#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) -#define ext2_find_next_zero_bit(addr, size, offset) \ - find_next_zero_bit((unsigned long *)(addr), (size), (offset)) -#else -static __inline__ int ext2_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR |= mask; - local_irq_restore(flags); - return retval; -} - -static __inline__ int ext2_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR &= ~mask; - local_irq_restore(flags); - return retval; -} - -static __inline__ int ext2_test_bit(int nr, const volatile void * addr) -{ - int mask; - const volatile unsigned char *ADDR = (const unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - return ((mask & *ADDR) != 0); -} - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease preformance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} -#endif - -#define ext2_set_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_set_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -#define ext2_clear_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_clear_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) - -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #endif /* __KERNEL__ */ diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 972c3f655b2a..9c89287c3e56 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* PFN start number, because of __MEMORY_START */ #define PFN_START (__MEMORY_START >> PAGE_SHIFT) - -#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START) +#define ARCH_PFN_OFFSET (FPN_START) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) @@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* __ASM_SH_PAGE_H */ diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h index 914e3fcbbd37..6c41a60657f1 100644 --- a/include/asm-sh/stat.h +++ b/include/asm-sh/stat.h @@ -60,13 +60,7 @@ struct stat64 { long long st_size; unsigned long st_blksize; -#if defined(__BIG_ENDIAN__) - unsigned long __pad4; /* Future possible st_blocks hi bits */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ -#else /* Must be little */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - unsigned long __pad4; /* Future possible st_blocks hi bits */ -#endif + unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_atime; unsigned long st_atime_nsec; diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index 85f0c11b4319..7345350d98c0 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h @@ -18,7 +18,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ - __u32 flags; /* low level flags */ + unsigned long flags; /* low level flags */ __u32 cpu; int preempt_count; /* 0 => preemptable, <0 => BUG */ struct restart_block restart_block; diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h index cb7e183a0a6b..488552f43b2a 100644 --- a/include/asm-sh/types.h +++ b/include/asm-sh/types.h @@ -58,6 +58,11 @@ typedef u64 sector_t; #define HAVE_SECTOR_T #endif +#ifdef CONFIG_LSF +typedef u64 blkcnt_t; +#define HAVE_BLKCNT_T +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h index ce9c3ad45fe0..f3bdcdb5d046 100644 --- a/include/asm-sh64/bitops.h +++ b/include/asm-sh64/bitops.h @@ -31,16 +31,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static inline void __set_bit(int nr, void *addr) -{ - int mask; - unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a |= mask; -} - /* * clear_bit() doesn't provide any barrier for the compiler. */ @@ -58,15 +48,6 @@ static inline void clear_bit(int nr, volatile unsigned long *a) local_irq_restore(flags); } -static inline void __clear_bit(int nr, volatile unsigned long *a) -{ - int mask; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a &= ~mask; -} - static __inline__ void change_bit(int nr, volatile void * addr) { int mask; @@ -80,16 +61,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) local_irq_restore(flags); } -static __inline__ void __change_bit(int nr, volatile void * addr) -{ - int mask; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a ^= mask; -} - static __inline__ int test_and_set_bit(int nr, volatile void * addr) { int mask, retval; @@ -106,19 +77,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a |= mask; - - return retval; -} - static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; @@ -135,19 +93,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a &= ~mask; - - return retval; -} - static __inline__ int test_and_change_bit(int nr, volatile void * addr) { int mask, retval; @@ -164,23 +109,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) return retval; } -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a ^= mask; - - return retval; -} - -static __inline__ int test_bit(int nr, const volatile void *addr) -{ - return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); -} +#include <asm-generic/bitops/non-atomic.h> static __inline__ unsigned long ffz(unsigned long word) { @@ -204,313 +133,16 @@ static __inline__ unsigned long ffz(unsigned long word) return result; } -/** - * __ffs - find first bit in word - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int r = 0; - - if (!word) - return 0; - if (!(word & 0xffff)) { - word >>= 16; - r += 16; - } - if (!(word & 0xff)) { - word >>= 8; - r += 8; - } - if (!(word & 0xf)) { - word >>= 4; - r += 4; - } - if (!(word & 3)) { - word >>= 2; - r += 2; - } - if (!(word & 1)) { - word >>= 1; - r += 1; - } - return r; -} - -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned int *p = ((unsigned int *) addr) + (offset >> 5); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - - -static inline int find_next_zero_bit(void *addr, int size, int offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ - -static inline int sched_find_first_bit(unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -#define ffs(x) generic_ffs(x) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#ifdef __LITTLE_ENDIAN__ -#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) -#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) -#define ext2_test_bit(nr, addr) test_bit((nr), (addr)) -#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) -#define ext2_find_next_zero_bit(addr, size, offset) \ - find_next_zero_bit((addr), (size), (offset)) -#else -static __inline__ int ext2_set_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR |= mask; - local_irq_restore(flags); - return retval; -} - -static __inline__ int ext2_clear_bit(int nr, volatile void * addr) -{ - int mask, retval; - unsigned long flags; - volatile unsigned char *ADDR = (unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - local_irq_save(flags); - retval = (mask & *ADDR) != 0; - *ADDR &= ~mask; - local_irq_restore(flags); - return retval; -} - -static __inline__ int ext2_test_bit(int nr, const volatile void * addr) -{ - int mask; - const volatile unsigned char *ADDR = (const unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - return ((mask & *ADDR) != 0); -} - -#define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease preformance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} -#endif - -#define ext2_set_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_set_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -#define ext2_clear_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_clear_bit((nr), (addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) - -#define ffs(x) generic_ffs(x) -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #endif /* __KERNEL__ */ diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h index c86df90f7cbd..e4937cdabebd 100644 --- a/include/asm-sh64/page.h +++ b/include/asm-sh64/page.h @@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* PFN start number, because of __MEMORY_START */ #define PFN_START (__MEMORY_START >> PAGE_SHIFT) - -#define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START) +#define ARCH_PFN_OFFSET (PFN_START) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) @@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* __ASM_SH64_PAGE_H */ diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h index 7046a9014027..bd0d9c405a80 100644 --- a/include/asm-sh64/platform.h +++ b/include/asm-sh64/platform.h @@ -61,9 +61,4 @@ extern int platform_int_priority[NR_INTC_IRQS]; #define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2]) #define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1]) -/* Be prepared to 64-bit sign extensions */ -#define PFN_UP(x) ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff) -#define PFN_DOWN(x) (((x) >> PAGE_SHIFT) & 0x000fffff) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - #endif /* __ASM_SH64_PLATFORM_H */ diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index 41722b5e45ef..04aa3318f76a 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h @@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) : "memory", "cc"); } -/* - * non-atomic versions - */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - - *p |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - - *p &= ~mask; -} - -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - - *p ^= mask; -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1UL << (nr & 0x1f); - unsigned long *p = ((unsigned long *)addr) + (nr >> 5); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} +#include <asm-generic/bitops/non-atomic.h> #define smp_mb__before_clear_bit() do { } while(0) #define smp_mb__after_clear_bit() do { } while(0) -/* The following routine need not be atomic. */ -static inline int test_bit(int nr, __const__ volatile unsigned long *addr) -{ - return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; -} - -/* The easy/cheese version for now. */ -static inline unsigned long ffz(unsigned long word) -{ - unsigned long result = 0; - - while(word & 1) { - result++; - word >>= 1; - } - return result; -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline int __ffs(unsigned long word) -{ - int num = 0; - - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static inline int ffs(int x) -{ - if (!x) - return 0; - return __ffs((unsigned long)x) + 1; -} - -/* - * fls: find last (most-significant) bit set. - * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. - */ -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -/* - * find_next_zero_bit() finds the first zero bit in a bit string of length - * 'size' bits, starting the search at bit 'offset'. This is largely based - * on Linus's ALPHA routines, which are pretty portable BTW. - */ -static inline unsigned long find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ -found_middle: - return result + ffz(tmp); -} - -/* - * Linus sez that gcc can optimize the following correctly, we'll see if this - * holds on the Sparc as it does for the ALPHA. - */ -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - * - * Scheduler induced bitop, do not use. - */ -static inline int find_next_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - int num = offset & ~0x1f; - unsigned long word; - - word = *p++; - word &= ~((1 << (offset & 0x1f)) - 1); - while (num < size) { - if (word != 0) { - return __ffs(word) + num; - } - word = *p++; - num += 0x20; - } - return num; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -/* - */ -static inline int test_le_bit(int nr, __const__ unsigned long * addr) -{ - __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; - return (ADDR[nr >> 3] >> (nr & 7)) & 1; -} - -/* - * non-atomic versions - */ -static inline void __set_le_bit(int nr, unsigned long *addr) -{ - unsigned char *ADDR = (unsigned char *)addr; - - ADDR += nr >> 3; - *ADDR |= 1 << (nr & 0x07); -} - -static inline void __clear_le_bit(int nr, unsigned long *addr) -{ - unsigned char *ADDR = (unsigned char *)addr; - - ADDR += nr >> 3; - *ADDR &= ~(1 << (nr & 0x07)); -} - -static inline int __test_and_set_le_bit(int nr, unsigned long *addr) -{ - int mask, retval; - unsigned char *ADDR = (unsigned char *)addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - retval = (mask & *ADDR) != 0; - *ADDR |= mask; - return retval; -} - -static inline int __test_and_clear_le_bit(int nr, unsigned long *addr) -{ - int mask, retval; - unsigned char *ADDR = (unsigned char *)addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - retval = (mask & *ADDR) != 0; - *ADDR &= ~mask; - return retval; -} - -static inline unsigned long find_next_zero_le_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - const unsigned long *p = addr + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - tmp = __swab32(tmp) | (~0UL << size); - if (tmp == ~0UL) /* Are any bits zero? */ - return result + size; /* Nope. */ - return result + ffz(tmp); - -found_middle: - return result + ffz(__swab32(tmp)); -} - -#define find_first_zero_le_bit(addr, size) \ - find_next_zero_le_bit((addr), (size), 0) - -#define ext2_set_bit(nr,addr) \ - __test_and_set_le_bit((nr),(unsigned long *)(addr)) -#define ext2_clear_bit(nr,addr) \ - __test_and_clear_le_bit((nr),(unsigned long *)(addr)) - -#define ext2_set_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -#define ext2_clear_bit_atomic(lock, nr, addr) \ - ({ \ - int ret; \ - spin_lock(lock); \ - ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ - spin_unlock(lock); \ - ret; \ - }) - -#define ext2_test_bit(nr,addr) \ - test_le_bit((nr),(unsigned long *)(addr)) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_le_bit((unsigned long *)(addr), (size)) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) \ - test_and_set_bit((nr),(unsigned long *)(addr)) -#define minix_set_bit(nr,addr) \ - set_bit((nr),(unsigned long *)(addr)) -#define minix_test_and_clear_bit(nr,addr) \ - test_and_clear_bit((nr),(unsigned long *)(addr)) -#define minix_test_bit(nr,addr) \ - test_bit((nr),(unsigned long *)(addr)) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit((unsigned long *)(addr),(size)) +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/ext2-non-atomic.h> +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h index 9122684f6c1e..ec3274b7ddf4 100644 --- a/include/asm-sparc/page.h +++ b/include/asm-sparc/page.h @@ -152,8 +152,7 @@ extern unsigned long pfn_base; #define virt_to_phys __pa #define phys_to_virt __va -#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base))) -#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base)) +#define ARCH_PFN_OFFSET (pfn_base) #define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) @@ -164,6 +163,7 @@ extern unsigned long pfn_base; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _SPARC_PAGE_H */ diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 6efc0162fb09..71944b0f09de 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr); extern void clear_bit(unsigned long nr, volatile unsigned long *addr); extern void change_bit(unsigned long nr, volatile unsigned long *addr); -/* "non-atomic" versions... */ - -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - - *m |= (1UL << (nr & 63)); -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - - *m &= ~(1UL << (nr & 63)); -} - -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - - *m ^= (1UL << (nr & 63)); -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); - - *m = (old | mask); - return ((old & mask) != 0); -} - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); - - *m = (old & ~mask); - return ((old & mask) != 0); -} - -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long *m = ((unsigned long *)addr) + (nr >> 6); - unsigned long old = *m; - unsigned long mask = (1UL << (nr & 63)); - - *m = (old ^ mask); - return ((old & mask) != 0); -} +#include <asm-generic/bitops/non-atomic.h> #ifdef CONFIG_SMP #define smp_mb__before_clear_bit() membar_storeload_loadload() @@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) #define smp_mb__after_clear_bit() barrier() #endif -static inline int test_bit(int nr, __const__ volatile unsigned long *addr) -{ - return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; -} - -/* The easy/cheese version for now. */ -static inline unsigned long ffz(unsigned long word) -{ - unsigned long result; - - result = 0; - while(word & 1) { - result++; - word >>= 1; - } - return result; -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - unsigned long result = 0; - - while (!(word & 1UL)) { - result++; - word >>= 1; - } - return result; -} - -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> #ifdef __KERNEL__ -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(((unsigned int)b[1]))) - return __ffs(b[1]) + 64; - if (b[1] >> 32) - return __ffs(b[1] >> 32) + 96; - return __ffs(b[2]) + 128; -} - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static inline int ffs(int x) -{ - if (!x) - return 0; - return __ffs((unsigned long)x) + 1; -} +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> /* * hweightN: returns the hamming weight (i.e. the number @@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w) #else -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #endif #endif /* __KERNEL__ */ -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -extern unsigned long find_next_bit(const unsigned long *, unsigned long, - unsigned long); - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -/* find_next_zero_bit() finds the first zero bit in a bit string of length - * 'size' bits, starting the search at bit 'offset'. This is largely based - * on Linus's ALPHA routines, which are pretty portable BTW. - */ - -extern unsigned long find_next_zero_bit(const unsigned long *, - unsigned long, unsigned long); - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) - -#define test_and_set_le_bit(nr,addr) \ - test_and_set_bit((nr) ^ 0x38, (addr)) -#define test_and_clear_le_bit(nr,addr) \ - test_and_clear_bit((nr) ^ 0x38, (addr)) - -static inline int test_le_bit(int nr, __const__ unsigned long * addr) -{ - int mask; - __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; - - ADDR += nr >> 3; - mask = 1 << (nr & 0x07); - return ((mask & *ADDR) != 0); -} - -#define find_first_zero_le_bit(addr, size) \ - find_next_zero_le_bit((addr), (size), 0) - -extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long); +#include <asm-generic/bitops/find.h> #ifdef __KERNEL__ -#define __set_le_bit(nr, addr) \ - __set_bit((nr) ^ 0x38, (addr)) -#define __clear_le_bit(nr, addr) \ - __clear_bit((nr) ^ 0x38, (addr)) -#define __test_and_clear_le_bit(nr, addr) \ - __test_and_clear_bit((nr) ^ 0x38, (addr)) -#define __test_and_set_le_bit(nr, addr) \ - __test_and_set_bit((nr) ^ 0x38, (addr)) +#include <asm-generic/bitops/ext2-non-atomic.h> -#define ext2_set_bit(nr,addr) \ - __test_and_set_le_bit((nr),(unsigned long *)(addr)) #define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_le_bit((nr),(unsigned long *)(addr)) -#define ext2_clear_bit(nr,addr) \ - __test_and_clear_le_bit((nr),(unsigned long *)(addr)) + test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) #define ext2_clear_bit_atomic(lock,nr,addr) \ - test_and_clear_le_bit((nr),(unsigned long *)(addr)) -#define ext2_test_bit(nr,addr) \ - test_le_bit((nr),(unsigned long *)(addr)) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_le_bit((unsigned long *)(addr), (size)) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) + test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) \ - test_and_set_bit((nr),(unsigned long *)(addr)) -#define minix_set_bit(nr,addr) \ - set_bit((nr),(unsigned long *)(addr)) -#define minix_test_and_clear_bit(nr,addr) \ - test_and_clear_bit((nr),(unsigned long *)(addr)) -#define minix_test_bit(nr,addr) \ - test_bit((nr),(unsigned long *)(addr)) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit((unsigned long *)(addr),(size)) +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index 49d49a285943..6a95d5d0c576 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -738,7 +738,7 @@ static unsigned long __init sun_floppy_init(void) if (!sun_floppy_types[0] && sun_floppy_types[1]) { /* * Set the drive exchange bit in FCR on NS87303, - * make shure other bits are sane before doing so. + * make sure other bits are sane before doing so. */ ns87303_modify(config, FER, FER_EDM, 0); ns87303_modify(config, ASC, ASC_DRV2_SEL, 0); diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h index 34c4b43d3f98..dee40206b221 100644 --- a/include/asm-sparc64/futex.h +++ b/include/asm-sparc64/futex.h @@ -83,4 +83,28 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + __asm__ __volatile__( + "\n1: lduwa [%2] %%asi, %0\n" + "2: casa [%2] %%asi, %0, %1\n" + "3:\n" + " .section .fixup,#alloc,#execinstr\n" + " .align 4\n" + "4: ba 3b\n" + " mov %3, %0\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .align 4\n" + " .word 1b, 4b\n" + " .word 2b, 4b\n" + " .previous\n" + : "=&r" (oldval) + : "r" (newval), "r" (uaddr), "i" (-EFAULT) + : "memory"); + + return oldval; +} + #endif /* !(_SPARC64_FUTEX_H) */ diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h index 6321f5a0198d..4040d127ac3e 100644 --- a/include/asm-sparc64/kdebug.h +++ b/include/asm-sparc64/kdebug.h @@ -15,12 +15,9 @@ struct die_args { int signr; }; -/* Note - you should never unregister because that can race with NMIs. - * If you really want to do it first unregister - then synchronize_sched - * - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *sparc64die_chain; +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head sparc64die_chain; extern void bad_trap(struct pt_regs *, long); @@ -46,7 +43,7 @@ static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs, .trapnr = trap, .signr = sig }; - return notifier_call_chain(&sparc64die_chain, val, &args); + return atomic_notifier_call_chain(&sparc64die_chain, val, &args); } #endif diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 66fe4ac59fd6..aabb21906724 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -111,6 +111,8 @@ typedef unsigned long pgprot_t; (_AC(0x0000000070000000,UL)) : \ (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) +#include <asm-generic/memory_model.h> + #endif /* !(__ASSEMBLY__) */ /* to align the pointer to the (next) page boundary */ diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 0229814af31e..41364330aff1 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -106,9 +106,6 @@ extern unsigned long uml_physmem; #define __pa(virt) to_phys((void *) (unsigned long) (virt)) #define __va(phys) to_virt((unsigned long) (phys)) -#define page_to_pfn(page) ((page) - mem_map) -#define pfn_to_page(pfn) (mem_map + (pfn)) - #define phys_to_pfn(p) ((p) >> PAGE_SHIFT) #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) @@ -121,6 +118,7 @@ extern struct page *arch_validate(struct page *page, gfp_t mask, int order); extern void arch_free_page(struct page *page, int order); #define HAVE_ARCH_FREE_PAGE +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h index 2ee028b8de9d..4e460d6f5ac8 100644 --- a/include/asm-um/uaccess.h +++ b/include/asm-um/uaccess.h @@ -41,16 +41,16 @@ #define __get_user(x, ptr) \ ({ \ - const __typeof__(ptr) __private_ptr = ptr; \ - __typeof__(*(__private_ptr)) __private_val; \ - int __private_ret = -EFAULT; \ - (x) = (__typeof__(*(__private_ptr)))0; \ - if (__copy_from_user(&__private_val, (__private_ptr), \ - sizeof(*(__private_ptr))) == 0) {\ - (x) = (__typeof__(*(__private_ptr))) __private_val; \ - __private_ret = 0; \ - } \ - __private_ret; \ + const __typeof__(ptr) __private_ptr = ptr; \ + __typeof__(x) __private_val; \ + int __private_ret = -EFAULT; \ + (x) = (__typeof__(*(__private_ptr)))0; \ + if (__copy_from_user((void *) &__private_val, (__private_ptr), \ + sizeof(*(__private_ptr))) == 0) { \ + (x) = (__typeof__(*(__private_ptr))) __private_val; \ + __private_ret = 0; \ + } \ + __private_ret; \ }) #define get_user(x, ptr) \ @@ -89,14 +89,3 @@ struct exception_table_entry }; #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h index 609b9e87222a..1f6fd5ab4177 100644 --- a/include/asm-v850/bitops.h +++ b/include/asm-v850/bitops.h @@ -22,25 +22,11 @@ #ifdef __KERNEL__ -/* - * The __ functions are not atomic - */ +#include <asm-generic/bitops/ffz.h> /* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. + * The __ functions are not atomic */ -static inline unsigned long ffz (unsigned long word) -{ - unsigned long result = 0; - - while (word & 1) { - result++; - word >>= 1; - } - return result; -} - /* In the following constant-bit-op macros, a "g" constraint is used when we really need an integer ("i" constraint). This is to avoid @@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr) #define smp_mb__before_clear_bit() barrier () #define smp_mb__after_clear_bit() barrier () +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit ((addr), (size), 0) - -static inline int find_next_zero_bit(const void *addr, int size, int offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = * (p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~ (tmp = * (p++))) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - - found_first: - tmp |= ~0UL << size; - found_middle: - return result + ffz (tmp); -} - - -/* This is the same as generic_ffs, but we can't use that because it's - inline and the #include order mucks things up. */ -static inline int generic_ffs_for_find_next_bit(int x) -{ - int r = 1; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -/* - * Find next one bit in a bitmap reasonably efficiently. - */ -static __inline__ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) -{ - unsigned int *p = ((unsigned int *) addr) + (offset >> 5); - unsigned int result = offset & ~31UL; - unsigned int tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + generic_ffs_for_find_next_bit(tmp); -} - -/* - * find_first_bit - find the first set bit in a memory region - */ -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - - -#define ffs(x) generic_ffs (x) -#define fls(x) generic_fls (x) -#define fls64(x) generic_fls64(x) -#define __ffs(x) ffs(x) - - -/* - * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes - * that at least one bit is set, and returns the real index of the bit - * (rather than the bit index + 1, like ffs does). - */ -static inline int sched_ffs(int x) -{ - int r = 0; - - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is set. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - unsigned offs = 0; - while (! *b) { - b++; - offs += 32; - } - return sched_ffs (*b) + offs; -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ -#define hweight32(x) generic_hweight32 (x) -#define hweight16(x) generic_hweight16 (x) -#define hweight8(x) generic_hweight8 (x) - -#define ext2_set_bit test_and_set_bit +#include <asm-generic/bitops/ext2-non-atomic.h> #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit test_and_clear_bit #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit test_and_set_bit -#define minix_set_bit set_bit -#define minix_test_and_clear_bit test_and_clear_bit -#define minix_test_bit test_bit -#define minix_find_first_zero_bit find_first_zero_bit +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h index b4bc85e7b91a..ad03c46a1f92 100644 --- a/include/asm-v850/page.h +++ b/include/asm-v850/page.h @@ -111,8 +111,7 @@ typedef unsigned long pgprot_t; #define page_to_virt(page) \ ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) -#define pfn_to_page(pfn) virt_to_page (pfn_to_virt (pfn)) -#define page_to_pfn(page) virt_to_pfn (page_to_virt (page)) +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(kaddr) \ @@ -125,6 +124,7 @@ typedef unsigned long pgprot_t; #endif /* KERNEL */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* __V850_PAGE_H__ */ diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index eb4df23e1e41..79212128d0f7 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word) #ifdef __KERNEL__ -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (b[0]) - return __ffs(b[0]); - if (b[1]) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -} +#include <asm-generic/bitops/sched.h> /** * ffs - find first bit set @@ -412,43 +405,20 @@ static __inline__ int fls(int x) return r+1; } -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #endif /* __KERNEL__ */ #ifdef __KERNEL__ -#define ext2_set_bit(nr,addr) \ - __test_and_set_bit((nr),(unsigned long*)addr) +#include <asm-generic/bitops/ext2-non-atomic.h> + #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr,addr) \ test_and_clear_bit((nr),(unsigned long*)addr) -#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_bit((unsigned long*)addr, size) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_bit((unsigned long*)addr, size, off) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) -#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) -#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit((void*)addr,size) + +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 8602c09bf89e..9804bf07b092 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h @@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__( + "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" + + "2: .section .fixup, \"ax\" \n" + "3: mov %2, %0 \n" + " jmp 2b \n" + " .previous \n" + + " .section __ex_table, \"a\" \n" + " .align 8 \n" + " .quad 1b,3b \n" + " .previous \n" + + : "=a" (oldval), "=m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" + ); + + return oldval; +} + #endif #endif diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b9ed4c0c8783..cf795631d9b4 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -5,21 +5,20 @@ struct pt_regs; -struct die_args { +struct die_args { struct pt_regs *regs; const char *str; - long err; + long err; int trapnr; int signr; -}; +}; + +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head die_chain; -/* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *die_chain; /* Grossly misnamed. */ -enum die_val { +enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, @@ -33,8 +32,8 @@ enum die_val { DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, -}; - +}; + static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { @@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str, .trapnr = trap, .signr = sig }; - return notifier_call_chain(&die_chain, val, &args); + return atomic_notifier_call_chain(&die_chain, val, &args); } extern int printk_address(unsigned long address); diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 937f99b26883..6b18cd8f293d 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -44,12 +44,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) -extern struct page *pfn_to_page(unsigned long pfn); -extern unsigned long page_to_pfn(struct page *page); extern int pfn_valid(unsigned long pfn); #endif -#define local_mapnr(kvaddr) \ - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) #endif #endif diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 615e3e494929..408185bac351 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < end_pfn) #endif @@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _X86_64_PAGE_H */ diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 8c8d88c036ed..37a3ec433ee5 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -20,6 +20,7 @@ #include <asm/mmsegment.h> #include <asm/percpu.h> #include <linux/personality.h> +#include <linux/cpumask.h> #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 @@ -65,6 +66,9 @@ struct cpuinfo_x86 { __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif __u8 apicid; __u8 booted_cores; /* number of cores as seen by OS */ } ____cacheline_aligned; @@ -354,9 +358,6 @@ struct extended_sigtable { struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) - #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 9ccbb2cfd5c0..a4fdaeb5c397 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern u8 phys_proc_id[NR_CPUS]; extern u8 cpu_core_id[NR_CPUS]; +extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c642f5d9882d..9db54e9d17bb 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -68,4 +68,6 @@ extern int __node_distance(int, int); #include <asm-generic/topology.h> +extern cpumask_t cpu_coregroup_map(int cpu); + #endif diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index da0341c57949..fcc516353087 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ #define __NR_unshare 272 __SYSCALL(__NR_unshare, sys_unshare) +#define __NR_set_robust_list 273 +__SYSCALL(__NR_set_robust_list, sys_set_robust_list) +#define __NR_get_robust_list 274 +__SYSCALL(__NR_get_robust_list, sys_get_robust_list) -#define __NR_syscall_max __NR_unshare +#define __NR_syscall_max __NR_get_robust_list #ifndef __NO_STUBS diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h index 0a2065f1a372..d815649617aa 100644 --- a/include/asm-xtensa/bitops.h +++ b/include/asm-xtensa/bitops.h @@ -23,156 +23,11 @@ # error SMP not supported on this architecture #endif -static __inline__ void set_bit(int nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} - -static __inline__ void __set_bit(int nr, volatile unsigned long * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - - *a |= mask; -} - -static __inline__ void clear_bit(int nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - - *a &= ~mask; -} - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ - #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -static __inline__ void change_bit(int nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static __inline__ void __change_bit(int nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - - *a ^= mask; -} - -static __inline__ int test_and_set_bit(int nr, volatile void * addr) -{ - unsigned long retval; - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) -{ - unsigned long retval; - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - - retval = (mask & *a) != 0; - *a |= mask; - - return retval; -} - -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) -{ - unsigned long retval; - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long old = *a; - - *a = old & ~mask; - return (old & mask) != 0; -} - -static __inline__ int test_and_change_bit(int nr, volatile void * addr) -{ - unsigned long retval; - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long flags; - - local_irq_save(flags); - - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} - -/* - * non-atomic version; can be reordered - */ - -static __inline__ int __test_and_change_bit(int nr, volatile void *addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - unsigned long *a = ((unsigned long *)addr) + (nr >> 5); - unsigned long old = *a; - - *a = old ^ mask; - return (old & mask) != 0; -} - -static __inline__ int test_bit(int nr, const volatile void *addr) -{ - return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); -} +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> #if XCHAL_HAVE_NSA @@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x) { return __cntlz(x); } -#define fls64(x) generic_fls64(x) - -static __inline__ int -find_next_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp &= ~0UL << offset; - if (size < 32) - goto found_first; - if (tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size >= 32) { - if ((tmp = *p++) != 0) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= ~0UL >> (32 - size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ - -#define find_first_bit(addr, size) \ - find_next_bit((addr), (size), 0) - -static __inline__ int -find_next_zero_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if (offset) { - tmp = *p++; - tmp |= ~0UL >> (32-offset); - if (size < 32) - goto found_first; - if (~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while (size & ~31UL) { - if (~(tmp = *p++)) - goto found_middle; - result += 32; - size -= 32; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp |= ~0UL << size; -found_middle: - return result + ffz(tmp); -} - -#define find_first_zero_bit(addr, size) \ - find_next_zero_bit((addr), (size), 0) +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/ext2-non-atomic.h> #ifdef __XTENSA_EL__ -# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) -# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) -# define ext2_test_bit(nr,addr) test_bit((nr), (addr)) -# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) -# define ext2_find_next_zero_bit(addr, size, offset) \ - find_next_zero_bit((addr), (size), (offset)) #elif defined(__XTENSA_EB__) -# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) -# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) -# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) -# define ext2_find_first_zero_bit(addr, size) \ - ext2_find_next_zero_bit((addr), (size), 0) - -static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 31UL; - if(offset) { - /* We hold the little endian value in tmp, but then the - * shift is illegal. So we could keep a big endian value - * in tmp, like this: - * - * tmp = __swab32(*(p++)); - * tmp |= ~0UL >> (32-offset); - * - * but this would decrease preformance, so we change the - * shift: - */ - tmp = *(p++); - tmp |= __swab32(~0UL >> (32-offset)); - if(size < 32) - goto found_first; - if(~tmp) - goto found_middle; - size -= 32; - result += 32; - } - while(size & ~31UL) { - if(~(tmp = *(p++))) - goto found_middle; - result += 32; - size -= 32; - } - if(!size) - return result; - tmp = *p; - -found_first: - /* tmp is little endian, so we would have to swab the shift, - * see above. But then we have to swab tmp below for ffz, so - * we might as well do this here. - */ - return result + ffz(__swab32(tmp) | (~0UL << size)); -found_middle: - return result + ffz(__swab32(tmp)); -} - #else # error processor byte order undefined! #endif - -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -/* - * Find the first bit set in a 140-bit bitmap. - * The first 100 bits are unlikely to be set. - */ - -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (unlikely(b[0])) - return __ffs(b[0]); - if (unlikely(b[1])) - return __ffs(b[1]) + 32; - if (unlikely(b[2])) - return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; -} - - -/* Bitmap functions for the minix filesystem. */ - -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) -#define minix_set_bit(nr,addr) set_bit(nr,addr) -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) -#define minix_test_bit(nr,addr) test_bit(nr,addr) -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h index 8ded36f255a2..992bac5c1258 100644 --- a/include/asm-xtensa/page.h +++ b/include/asm-xtensa/page.h @@ -109,10 +109,7 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) -#ifndef CONFIG_DISCONTIGMEM -# define pfn_to_page(pfn) (mem_map + (pfn)) -# define page_to_pfn(page) ((unsigned long)((page) - mem_map)) -#else +#ifdef CONFIG_DISCONTIGMEM # error CONFIG_DISCONTIGMEM not supported #endif @@ -130,4 +127,5 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #endif /* _XTENSA_PAGE_H */ diff --git a/include/linux/adb.h b/include/linux/adb.h index e9fdc63483c7..b7305b178279 100644 --- a/include/linux/adb.h +++ b/include/linux/adb.h @@ -85,7 +85,7 @@ enum adb_message { ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ }; extern struct adb_driver *adb_controller; -extern struct notifier_block *adb_client_list; +extern struct blocking_notifier_head adb_client_list; int adb_request(struct adb_request *req, void (*done)(struct adb_request *), int flags, int nbytes, ...); diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 9343c89d843c..0a6bc52ffe88 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -19,18 +19,37 @@ #undef AUTOFS_MIN_PROTO_VERSION #undef AUTOFS_MAX_PROTO_VERSION -#define AUTOFS_PROTO_VERSION 4 +#define AUTOFS_PROTO_VERSION 5 #define AUTOFS_MIN_PROTO_VERSION 3 -#define AUTOFS_MAX_PROTO_VERSION 4 +#define AUTOFS_MAX_PROTO_VERSION 5 -#define AUTOFS_PROTO_SUBVERSION 7 +#define AUTOFS_PROTO_SUBVERSION 0 /* Mask for expire behaviour */ #define AUTOFS_EXP_IMMEDIATE 1 #define AUTOFS_EXP_LEAVES 2 -/* New message type */ -#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */ +/* Daemon notification packet types */ +enum autofs_notify { + NFY_NONE, + NFY_MOUNT, + NFY_EXPIRE +}; + +/* Kernel protocol version 4 packet types */ + +/* Expire entry (umount request) */ +#define autofs_ptype_expire_multi 2 + +/* Kernel protocol version 5 packet types */ + +/* Indirect mount missing and expire requests. */ +#define autofs_ptype_missing_indirect 3 +#define autofs_ptype_expire_indirect 4 + +/* Direct mount missing and expire requests */ +#define autofs_ptype_missing_direct 5 +#define autofs_ptype_expire_direct 6 /* v4 multi expire (via pipe) */ struct autofs_packet_expire_multi { @@ -40,14 +59,36 @@ struct autofs_packet_expire_multi { char name[NAME_MAX+1]; }; +/* autofs v5 common packet struct */ +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[NAME_MAX+1]; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + union autofs_packet_union { struct autofs_packet_hdr hdr; struct autofs_packet_missing missing; struct autofs_packet_expire expire; struct autofs_packet_expire_multi expire_multi; + struct autofs_v5_packet v5_packet; }; #define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) #define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) #define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index f17525a963d1..5d1eabcde5d5 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -3,88 +3,11 @@ #include <asm/types.h> /* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -static inline int generic_ffs(int x) -{ - int r = 1; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -/* - * fls: find last bit set. - */ - -static __inline__ int generic_fls(int x) -{ - int r = 32; - - if (!x) - return 0; - if (!(x & 0xffff0000u)) { - x <<= 16; - r -= 16; - } - if (!(x & 0xff000000u)) { - x <<= 8; - r -= 8; - } - if (!(x & 0xf0000000u)) { - x <<= 4; - r -= 4; - } - if (!(x & 0xc0000000u)) { - x <<= 2; - r -= 2; - } - if (!(x & 0x80000000u)) { - x <<= 1; - r -= 1; - } - return r; -} - -/* * Include this here because some architectures need generic_ffs/fls in * scope */ #include <asm/bitops.h> - -static inline int generic_fls64(__u64 x) -{ - __u32 h = x >> 32; - if (h) - return fls(h) + 32; - return fls(x); -} - static __inline__ int get_bitmask_order(unsigned int count) { int order; @@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count) return order; } -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -static inline unsigned int generic_hweight32(unsigned int w) -{ - unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); - res = (res & 0x33333333) + ((res >> 2) & 0x33333333); - res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); - res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); - return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); -} - -static inline unsigned int generic_hweight16(unsigned int w) -{ - unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); - res = (res & 0x3333) + ((res >> 2) & 0x3333); - res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); - return (res & 0x00FF) + ((res >> 8) & 0x00FF); -} - -static inline unsigned int generic_hweight8(unsigned int w) -{ - unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); - res = (res & 0x33) + ((res >> 2) & 0x33); - return (res & 0x0F) + ((res >> 4) & 0x0F); -} - -static inline unsigned long generic_hweight64(__u64 w) -{ -#if BITS_PER_LONG < 64 - return generic_hweight32((unsigned int)(w >> 32)) + - generic_hweight32((unsigned int)w); -#else - u64 res; - res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); - res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); - res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); - res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); - res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); - return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); -#endif -} - static inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c179966f1a2f..d0cac8b58de7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -55,25 +55,29 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - /* - * circular list of cfq_io_contexts belonging to a process io context - */ - struct list_head list; - struct cfq_queue *cfqq[2]; + struct rb_node rb_node; void *key; + struct cfq_queue *cfqq[2]; + struct io_context *ioc; unsigned long last_end_request; - unsigned long last_queue; + sector_t last_request_pos; + unsigned long last_queue; + unsigned long ttime_total; unsigned long ttime_samples; unsigned long ttime_mean; + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + struct list_head queue_list; - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); + void (*dtor)(struct io_context *); /* destructor */ + void (*exit)(struct io_context *); /* called on task exit */ }; /* @@ -94,7 +98,7 @@ struct io_context { int nr_batch_requests; /* Number of requests left in the batch */ struct as_io_context *aic; - struct cfq_io_context *cic; + struct rb_root cic_root; }; void put_io_context(struct io_context *ioc); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 7155452fb4a8..de3eb8d8ae26 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -38,6 +38,7 @@ typedef struct bootmem_data { unsigned long last_pos; unsigned long last_success; /* Previous allocation point. To speed * up searching */ + struct list_head list; } bootmem_data_t; extern unsigned long __init bootmem_bootmap_pages (unsigned long); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9f159baf153f..fb7e9b7ccbe3 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -46,25 +46,28 @@ struct address_space; typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); /* - * Keep related fields in common cachelines. The most commonly accessed - * field (b_state) goes at the start so the compiler does not generate - * indexed addressing for it. + * Historically, a buffer_head was used to map a single block + * within a page, and of course as the unit of I/O through the + * filesystem and block layers. Nowadays the basic I/O unit + * is the bio, and buffer_heads are used for extracting block + * mappings (via a get_block_t call), for tracking state within + * a page (via a page_mapping) and for wrapping bio submission + * for backward compatibility reasons (e.g. submit_bh). */ struct buffer_head { - /* First cache line: */ unsigned long b_state; /* buffer state bitmap (see above) */ struct buffer_head *b_this_page;/* circular list of page's buffers */ struct page *b_page; /* the page this bh is mapped to */ - atomic_t b_count; /* users using this block */ - u32 b_size; /* block size */ - sector_t b_blocknr; /* block number */ - char *b_data; /* pointer to data block */ + sector_t b_blocknr; /* start block number */ + size_t b_size; /* size of mapping */ + char *b_data; /* pointer to data within the page */ struct block_device *b_bdev; bh_end_io_t *b_end_io; /* I/O completion */ void *b_private; /* reserved for b_end_io */ struct list_head b_assoc_buffers; /* associated with another mapping */ + atomic_t b_count; /* users using this buffer_head */ }; /* @@ -189,8 +192,8 @@ extern int buffer_heads_over_limit; * address_spaces. */ int try_to_release_page(struct page * page, gfp_t gfp_mask); -int block_invalidatepage(struct page *page, unsigned long offset); -int do_invalidatepage(struct page *page, unsigned long offset); +void block_invalidatepage(struct page *page, unsigned long offset); +void do_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_read_full_page(struct page*, get_block_t*); @@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, int generic_cont_expand(struct inode *inode, loff_t size); int generic_cont_expand_simple(struct inode *inode, loff_t size); int block_commit_write(struct page *page, unsigned from, unsigned to); -int block_sync_page(struct page *); +void block_sync_page(struct page *); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int generic_commit_write(struct file *, struct page *, unsigned, unsigned); int block_truncate_page(struct address_space *, loff_t, get_block_t *); @@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) set_buffer_mapped(bh); bh->b_bdev = sb->s_bdev; bh->b_blocknr = block; + bh->b_size = sb->s_blocksize; } /* diff --git a/include/linux/cdev.h b/include/linux/cdev.h index 8da37e29cb87..2216638962d2 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -5,13 +5,13 @@ struct cdev { struct kobject kobj; struct module *owner; - struct file_operations *ops; + const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; }; -void cdev_init(struct cdev *, struct file_operations *); +void cdev_init(struct cdev *, const struct file_operations *); struct cdev *cdev_alloc(void); diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index cc621ec409d8..b3ecf8f71d97 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -30,9 +30,9 @@ extern struct inode_operations coda_ioctl_inode_operations; extern struct address_space_operations coda_file_aops; extern struct address_space_operations coda_symlink_aops; -extern struct file_operations coda_dir_operations; -extern struct file_operations coda_file_operations; -extern struct file_operations coda_ioctl_operations; +extern const struct file_operations coda_dir_operations; +extern const struct file_operations coda_file_operations; +extern const struct file_operations coda_ioctl_operations; /* operations shared over more than one file */ int coda_open(struct inode *i, struct file *f); diff --git a/include/linux/compat.h b/include/linux/compat.h index c9ab2a26348c..6d3a654be1ae 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -45,6 +45,32 @@ struct compat_tms { compat_clock_t tms_cstime; }; +struct compat_timex { + compat_uint_t modes; + compat_long_t offset; + compat_long_t freq; + compat_long_t maxerror; + compat_long_t esterror; + compat_int_t status; + compat_long_t constant; + compat_long_t precision; + compat_long_t tolerance; + struct compat_timeval time; + compat_long_t tick; + compat_long_t ppsfreq; + compat_long_t jitter; + compat_int_t shift; + compat_long_t stabil; + compat_long_t jitcnt; + compat_long_t calcnt; + compat_long_t errcnt; + compat_long_t stbcnt; + + compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; + compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; + compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; +}; + #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) typedef struct { @@ -121,6 +147,24 @@ typedef struct compat_sigevent { } _sigev_un; } compat_sigevent_t; +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +extern void compat_exit_robust_list(struct task_struct *curr); + +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, + compat_size_t __user *len_ptr); long compat_sys_semctl(int first, int second, int third, void __user *uptr); long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); @@ -181,5 +225,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, return lhs->tv_nsec - rhs->tv_nsec; } +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index efb518f16bb3..89ab677cb993 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -140,6 +140,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS_32) COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) COMPATIBLE_IOCTL(DM_TARGET_MSG_32) +COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32) COMPATIBLE_IOCTL(DM_VERSION) COMPATIBLE_IOCTL(DM_REMOVE_ALL) COMPATIBLE_IOCTL(DM_LIST_DEVICES) @@ -155,6 +156,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS) COMPATIBLE_IOCTL(DM_TABLE_STATUS) COMPATIBLE_IOCTL(DM_LIST_VERSIONS) COMPATIBLE_IOCTL(DM_TARGET_MSG) +COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY) /* Big K */ COMPATIBLE_IOCTL(PIO_FONT) COMPATIBLE_IOCTL(GIO_FONT) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 99e6115d8e52..9cbb781d6f80 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -67,7 +67,7 @@ * * int any_online_cpu(mask) First online cpu in mask * - * for_each_cpu(cpu) for-loop cpu over cpu_possible_map + * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map * @@ -405,7 +405,8 @@ int __any_online_cpu(const cpumask_t *mask); #define any_online_cpu(mask) 0 #endif -#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) +#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) +#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 534d750d922d..32503657f14f 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -11,7 +11,7 @@ extern unsigned long long elfcorehdr_addr; extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); -extern struct file_operations proc_vmcore_operations; +extern const struct file_operations proc_vmcore_operations; extern struct proc_dir_entry *proc_vmcore; #endif /* CONFIG_CRASH_DUMP */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4b0428e335be..176e2d371577 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -29,7 +29,7 @@ struct debugfs_blob_wrapper { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, - struct file_operations *fops); + const struct file_operations *fops); struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 51e0e95a421a..aee10b2ea4c6 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -97,6 +97,7 @@ struct io_restrictions { unsigned short hardsect_size; unsigned int max_segment_size; unsigned long seg_boundary_mask; + unsigned char no_cluster; /* inverted so that 0 is default */ }; struct dm_target { diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index fa75ba0d635e..c67c6786612a 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -80,6 +80,16 @@ * * DM_TARGET_MSG: * Pass a message string to the target at a specific offset of a device. + * + * DM_DEV_SET_GEOMETRY: + * Set the geometry of a device by passing in a string in this format: + * + * "cylinders heads sectors_per_track start_sector" + * + * Beware that CHS geometry is nearly obsolete and only provided + * for compatibility with dm devices that can be booted by a PC + * BIOS. See struct hd_geometry for range limits. Also note that + * the geometry is erased if the device size changes. */ /* @@ -218,6 +228,7 @@ enum { /* Added later */ DM_LIST_VERSIONS_CMD, DM_TARGET_MSG_CMD, + DM_DEV_SET_GEOMETRY_CMD }; /* @@ -247,6 +258,7 @@ typedef char ioctl_struct[308]; #define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct) #define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct) #define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct) +#define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct) #endif #define DM_IOCTL 0xfd @@ -270,11 +282,12 @@ typedef char ioctl_struct[308]; #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) +#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 5 +#define DM_VERSION_MINOR 6 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2005-10-04)" +#define DM_VERSION_EXTRA "-ioctl (2006-02-17)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index a8731062a74c..9b4751aecc23 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -21,6 +21,7 @@ enum dma_data_direction { #define DMA_30BIT_MASK 0x000000003fffffffULL #define DMA_29BIT_MASK 0x000000001fffffffULL #define DMA_28BIT_MASK 0x000000000fffffffULL +#define DMA_24BIT_MASK 0x0000000000ffffffULL #include <asm/dma-mapping.h> diff --git a/include/linux/efi.h b/include/linux/efi.h index c7c5dd316182..e203613d3aec 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -240,19 +240,21 @@ struct efi_memory_map { unsigned long desc_size; }; +#define EFI_INVALID_TABLE_ADDR (~0UL) + /* * All runtime access to EFI goes through this structure: */ extern struct efi { efi_system_table_t *systab; /* EFI system table */ - void *mps; /* MPS table */ - void *acpi; /* ACPI table (IA64 ext 0.71) */ - void *acpi20; /* ACPI table (ACPI 2.0) */ - void *smbios; /* SM BIOS table */ - void *sal_systab; /* SAL system table */ - void *boot_info; /* boot info table */ - void *hcdp; /* HCDP table */ - void *uga; /* UGA table */ + unsigned long mps; /* MPS table */ + unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ + unsigned long acpi20; /* ACPI table (ACPI 2.0) */ + unsigned long smbios; /* SM BIOS table */ + unsigned long sal_systab; /* SAL system table */ + unsigned long boot_info; /* boot info table */ + unsigned long hcdp; /* HCDP table */ + unsigned long uga; /* UGA table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos extern u64 efi_get_iobase (void); extern u32 efi_mem_type (unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); +extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, + u64 attr); extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource); diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h index 28f368c526fb..fbfa6b52e2fb 100644 --- a/include/linux/efs_fs.h +++ b/include/linux/efs_fs.h @@ -37,7 +37,7 @@ static inline struct efs_sb_info *SUPER_INFO(struct super_block *sb) struct statfs; extern struct inode_operations efs_dir_inode_operations; -extern struct file_operations efs_dir_operations; +extern const struct file_operations efs_dir_operations; extern struct address_space_operations efs_symlink_aops; extern void efs_read_inode(struct inode *); diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index e7239f2f97a1..3ade6a4e3bdd 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -36,7 +36,8 @@ struct statfs; * Define EXT3_RESERVATION to reserve data blocks for expanding files */ #define EXT3_DEFAULT_RESERVE_BLOCKS 8 -#define EXT3_MAX_RESERVE_BLOCKS 1024 +/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ +#define EXT3_MAX_RESERVE_BLOCKS 1027 #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 /* * Always enable hashed directories @@ -732,6 +733,8 @@ struct dir_private_info { extern int ext3_bg_has_super(struct super_block *sb, int group); extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); +extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long, + unsigned long *, int *); extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, unsigned long); extern void ext3_free_blocks_sb (handle_t *, struct super_block *, @@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); -int ext3_get_block_handle(handle_t *handle, struct inode *inode, - sector_t iblock, struct buffer_head *bh_result, int create, - int extend_disksize); +int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, + sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, + int create, int extend_disksize); extern void ext3_read_inode (struct inode *); extern int ext3_write_inode (struct inode *, int); @@ -830,11 +833,11 @@ do { \ */ /* dir.c */ -extern struct file_operations ext3_dir_operations; +extern const struct file_operations ext3_dir_operations; /* file.c */ extern struct inode_operations ext3_file_inode_operations; -extern struct file_operations ext3_file_operations; +extern const struct file_operations ext3_file_operations; /* namei.c */ extern struct inode_operations ext3_dir_inode_operations; diff --git a/include/linux/fb.h b/include/linux/fb.h index 2cb19e6503aa..d03fadfcafe3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -734,7 +734,7 @@ struct fb_tile_ops { /* A driver may set this flag to indicate that it does want a set_par to be * called every time when fbcon_switch is executed. The advantage is that with - * this flag set you can really be shure that set_par is always called before + * this flag set you can really be sure that set_par is always called before * any of the functions dependant on the correct hardware state or altering * that state, even if you are using some broken X releases. The disadvantage * is that it introduces unwanted delays to every console switch if set_par diff --git a/include/linux/fs.h b/include/linux/fs.h index 5adf32b90f36..408fe89498f4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -252,9 +252,6 @@ extern void __init files_init(unsigned long); struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); -typedef int (get_blocks_t)(struct inode *inode, sector_t iblock, - unsigned long max_blocks, - struct buffer_head *bh_result, int create); typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); @@ -350,7 +347,7 @@ struct writeback_control; struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); - int (*sync_page)(struct page *); + void (*sync_page)(struct page *); /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); @@ -369,7 +366,7 @@ struct address_space_operations { int (*commit_write)(struct file *, struct page *, unsigned, unsigned); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); - int (*invalidatepage) (struct page *, unsigned long); + void (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, gfp_t); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); @@ -413,6 +410,9 @@ struct block_device { struct list_head bd_inodes; void * bd_holder; int bd_holders; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_list; +#endif struct block_device * bd_contains; unsigned bd_block_size; struct hd_struct * bd_part; @@ -490,13 +490,13 @@ struct inode { unsigned int i_blkbits; unsigned long i_blksize; unsigned long i_version; - unsigned long i_blocks; + blkcnt_t i_blocks; unsigned short i_bytes; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ struct mutex i_mutex; struct rw_semaphore i_alloc_sem; struct inode_operations *i_op; - struct file_operations *i_fop; /* former ->i_op->default_file_ops */ + const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct super_block *i_sb; struct file_lock *i_flock; struct address_space *i_mapping; @@ -636,7 +636,7 @@ struct file { } f_u; struct dentry *f_dentry; struct vfsmount *f_vfsmnt; - struct file_operations *f_op; + const struct file_operations *f_op; atomic_t f_count; unsigned int f_flags; mode_t f_mode; @@ -763,6 +763,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); +extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); @@ -1389,11 +1390,11 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, unsigned); -extern struct file_operations def_blk_fops; +extern const struct file_operations def_blk_fops; extern struct address_space_operations def_blk_aops; -extern struct file_operations def_chr_fops; -extern struct file_operations bad_sock_fops; -extern struct file_operations def_fifo_fops; +extern const struct file_operations def_chr_fops; +extern const struct file_operations bad_sock_fops; +extern const struct file_operations def_fifo_fops; extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); @@ -1401,12 +1402,19 @@ extern int blkdev_get(struct block_device *, mode_t, unsigned); extern int blkdev_put(struct block_device *); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); +#ifdef CONFIG_SYSFS +extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); +extern void bd_release_from_disk(struct block_device *, struct gendisk *); +#else +#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) +#define bd_release_from_disk(bdev, disk) bd_release(bdev) +#endif /* fs/char_dev.c */ extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int register_chrdev(unsigned int, const char *, - struct file_operations *); + const struct file_operations *); extern int unregister_chrdev(unsigned int, const char *); extern void unregister_chrdev_region(dev_t, unsigned); extern int chrdev_open(struct inode *, struct file *); @@ -1436,9 +1444,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern int is_bad_inode(struct inode *); -extern struct file_operations read_fifo_fops; -extern struct file_operations write_fifo_fops; -extern struct file_operations rdwr_fifo_fops; +extern const struct file_operations read_fifo_fops; +extern const struct file_operations write_fifo_fops; +extern const struct file_operations rdwr_fifo_fops; extern int fs_may_remount_ro(struct super_block *); @@ -1644,7 +1652,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos, ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, - unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, int lock_type); enum { @@ -1655,32 +1663,32 @@ enum { static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, - loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, + loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_blocks, end_io, DIO_LOCKING); + nr_segs, get_block, end_io, DIO_LOCKING); } static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, - loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, + loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_blocks, end_io, DIO_NO_LOCKING); + nr_segs, get_block, end_io, DIO_NO_LOCKING); } static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, - loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, + loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_blocks, end_io, DIO_OWN_LOCKING); + nr_segs, get_block, end_io, DIO_OWN_LOCKING); } -extern struct file_operations generic_ro_fops; +extern const struct file_operations generic_ro_fops; #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) @@ -1736,9 +1744,9 @@ extern int simple_commit_write(struct file *file, struct page *page, extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); -extern struct file_operations simple_dir_operations; +extern const struct file_operations simple_dir_operations; extern struct inode_operations simple_dir_inode_operations; -struct tree_descr { char *name; struct file_operations *ops; int mode; }; +struct tree_descr { char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); extern int simple_fill_super(struct super_block *, int, struct tree_descr *); extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count); diff --git a/include/linux/futex.h b/include/linux/futex.h index 10f96c31971e..966a5b3da439 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -1,6 +1,8 @@ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H +#include <linux/sched.h> + /* Second argument to futex syscall */ @@ -11,10 +13,97 @@ #define FUTEX_CMP_REQUEUE 4 #define FUTEX_WAKE_OP 5 +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + */ + +/* + * Per-lock list entry - embedded in user-space locks, somewhere close + * to the futex field. (Note: user-space uses a double-linked list to + * achieve O(1) list add and remove, but the kernel only needs to know + * about the forward link) + * + * NOTE: this structure is part of the syscall ABI, and must not be + * changed. + */ +struct robust_list { + struct robust_list __user *next; +}; + +/* + * Per-thread list head: + * + * NOTE: this structure is part of the syscall ABI, and must only be + * changed if the change is first communicated with the glibc folks. + * (When an incompatible change is done, we'll increase the structure + * size, which glibc will detect) + */ +struct robust_list_head { + /* + * The head of the list. Points back to itself if empty: + */ + struct robust_list list; + + /* + * This relative offset is set by user-space, it gives the kernel + * the relative position of the futex field to examine. This way + * we keep userspace flexible, to freely shape its data-structure, + * without hardcoding any particular offset into the kernel: + */ + long futex_offset; + + /* + * The death of the thread may race with userspace setting + * up a lock's links. So to handle this race, userspace first + * sets this field to the address of the to-be-taken lock, + * then does the lock acquire, and then adds itself to the + * list, and then clears this field. Hence the kernel will + * always have full knowledge of all locks that the thread + * _might_ have taken. We check the owner TID in any case, + * so only truly owned locks will be handled. + */ + struct robust_list __user *list_op_pending; +}; + +/* + * Are there any waiters for this robust futex: + */ +#define FUTEX_WAITERS 0x80000000 + +/* + * The kernel signals via this bit that a thread holding a futex + * has exited without unlocking the futex. The kernel also does + * a FUTEX_WAKE on such futexes, after setting the bit, to wake + * up any possible waiters: + */ +#define FUTEX_OWNER_DIED 0x40000000 + +/* + * The rest of the robust-futex field is for the TID: + */ +#define FUTEX_TID_MASK 0x3fffffff + +/* + * This limit protects against a deliberately circular list. + * (Not worth introducing an rlimit for it) + */ +#define ROBUST_LIST_LIMIT 2048 + long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, unsigned long uaddr2, int val2, int val3); +extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); + +#ifdef CONFIG_FUTEX +extern void exit_robust_list(struct task_struct *curr); +#else +static inline void exit_robust_list(struct task_struct *curr) +{ +} +#endif + #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 2401dea2b867..9c8e6da2393b 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name } /* - * Use the following fucntions to manipulate gameport's per-port + * Use the following functions to manipulate gameport's per-port * driver-specific data. */ static inline void *gameport_get_drvdata(struct gameport *gameport) @@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data) } /* - * Use the following fucntions to pin gameport's driver in process context + * Use the following functions to pin gameport's driver in process context */ static inline int gameport_pin_driver(struct gameport *gameport) { diff --git a/include/linux/genhd.h b/include/linux/genhd.h index fd647fde5ec1..10a27f29d692 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -78,6 +78,7 @@ struct hd_struct { sector_t start_sect; sector_t nr_sects; struct kobject kobj; + struct kobject *holder_dir; unsigned ios[2], sectors[2]; /* READs and WRITEs */ int policy, partno; }; @@ -89,12 +90,12 @@ struct hd_struct { #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 struct disk_stats { - unsigned sectors[2]; /* READs and WRITEs */ - unsigned ios[2]; - unsigned merges[2]; - unsigned ticks[2]; - unsigned io_ticks; - unsigned time_in_queue; + unsigned long sectors[2]; /* READs and WRITEs */ + unsigned long ios[2]; + unsigned long merges[2]; + unsigned long ticks[2]; + unsigned long io_ticks; + unsigned long time_in_queue; }; struct gendisk { @@ -114,6 +115,8 @@ struct gendisk { int number; /* more of the same */ struct device *driverfs_dev; struct kobject kobj; + struct kobject *holder_dir; + struct kobject *slave_dir; struct timer_rand_state *random; int policy; @@ -149,14 +152,14 @@ struct disk_attribute { ({ \ typeof(gendiskp->dkstats->field) res = 0; \ int i; \ - for_each_cpu(i) \ + for_each_possible_cpu(i) \ res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ res; \ }) static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) memset(per_cpu_ptr(gendiskp->dkstats, i), value, sizeof (struct disk_stats)); } diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h new file mode 100644 index 000000000000..70ad09c8ad1e --- /dev/null +++ b/include/linux/gigaset_dev.h @@ -0,0 +1,32 @@ +/* + * interface to user space for the gigaset driver + * + * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + * Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $ + * ===================================================================== + */ + +#ifndef GIGASET_INTERFACE_H +#define GIGASET_INTERFACE_H + +#include <linux/ioctl.h> + +#define GIGASET_IOCTL 0x47 + +#define GIGVER_DRIVER 0 +#define GIGVER_COMPAT 1 +#define GIGVER_FWBASE 2 + +#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int) +#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int) +#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay? +#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4]) + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6bece9280eb7..892c4ea1b425 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -7,6 +7,18 @@ #include <asm/cacheflush.h> +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct page *page, unsigned long vmaddr) +{ +} +#endif + +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ +} +#endif + #ifdef CONFIG_HIGHMEM #include <asm/highmem.h> diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 27238194b212..707f7cb9e795 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h @@ -3,6 +3,8 @@ #include <linux/compiler.h> +#ifdef __KERNEL__ + /* * Offsets into HPET Registers */ @@ -85,22 +87,6 @@ struct hpet { #define Tn_FSB_INT_ADDR_SHIFT (32UL) #define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) -struct hpet_info { - unsigned long hi_ireqfreq; /* Hz */ - unsigned long hi_flags; /* information */ - unsigned short hi_hpet; - unsigned short hi_timer; -}; - -#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ - -#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ -#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ -#define HPET_INFO _IOR('h', 0x03, struct hpet_info) -#define HPET_EPI _IO('h', 0x04) /* enable periodic */ -#define HPET_DPI _IO('h', 0x05) /* disable periodic */ -#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ - /* * exported interfaces */ @@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int); int hpet_unregister(struct hpet_task *); int hpet_control(struct hpet_task *, unsigned int, unsigned long); +#endif /* __KERNEL__ */ + +struct hpet_info { + unsigned long hi_ireqfreq; /* Hz */ + unsigned long hi_flags; /* information */ + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ + +#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ +#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ +#define HPET_INFO _IOR('h', 0x03, struct hpet_info) +#define HPET_EPI _IO('h', 0x04) /* enable periodic */ +#define HPET_DPI _IO('h', 0x05) /* disable periodic */ +#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ + #endif /* !__HPET__ */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6401c31d6add..93830158348e 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -34,15 +34,7 @@ enum hrtimer_restart { HRTIMER_RESTART, }; -/* - * Timer states: - */ -enum hrtimer_state { - HRTIMER_INACTIVE, /* Timer is inactive */ - HRTIMER_EXPIRED, /* Timer is expired */ - HRTIMER_RUNNING, /* Timer is running the callback function */ - HRTIMER_PENDING, /* Timer is pending */ -}; +#define HRTIMER_INACTIVE ((void *)1UL) struct hrtimer_base; @@ -53,9 +45,7 @@ struct hrtimer_base; * @expires: the absolute expiry time in the hrtimers internal * representation. The time is related to the clock on * which the timer is based. - * @state: state of the timer * @function: timer expiry callback function - * @data: argument for the callback function * @base: pointer to the timer base (per cpu and per clock) * * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() @@ -63,23 +53,23 @@ struct hrtimer_base; struct hrtimer { struct rb_node node; ktime_t expires; - enum hrtimer_state state; - int (*function)(void *); - void *data; + int (*function)(struct hrtimer *); struct hrtimer_base *base; }; /** * struct hrtimer_base - the timer base for a specific clock * - * @index: clock type index for per_cpu support when moving a timer - * to a base on another cpu. - * @lock: lock protecting the base and associated timers - * @active: red black tree root node for the active timers - * @first: pointer to the timer node which expires first - * @resolution: the resolution of the clock, in nanoseconds - * @get_time: function to retrieve the current time of the clock - * @curr_timer: the timer which is executing a callback right now + * @index: clock type index for per_cpu support when moving a timer + * to a base on another cpu. + * @lock: lock protecting the base and associated timers + * @active: red black tree root node for the active timers + * @first: pointer to the timer node which expires first + * @resolution: the resolution of the clock, in nanoseconds + * @get_time: function to retrieve the current time of the clock + * @get_sofirq_time: function to retrieve the current time from the softirq + * @curr_timer: the timer which is executing a callback right now + * @softirq_time: the time when running the hrtimer queue in the softirq */ struct hrtimer_base { clockid_t index; @@ -88,7 +78,9 @@ struct hrtimer_base { struct rb_node *first; ktime_t resolution; ktime_t (*get_time)(void); + ktime_t (*get_softirq_time)(void); struct hrtimer *curr_timer; + ktime_t softirq_time; }; /* @@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void); static inline int hrtimer_active(const struct hrtimer *timer) { - return timer->state == HRTIMER_PENDING; + return timer->node.rb_parent != HRTIMER_INACTIVE; } /* Forward a hrtimer so it expires after now: */ -extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); +extern unsigned long +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); /* Precise sleep: */ extern long hrtimer_nanosleep(struct timespec *rqtp, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d6f1019625af..4c5e610fe442 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -154,7 +154,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) return sb->s_fs_info; } -extern struct file_operations hugetlbfs_file_operations; +extern const struct file_operations hugetlbfs_file_operations; extern struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_zero_setup(size_t); int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 679b46a6a565..c8b81f419fd8 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -108,6 +108,10 @@ #define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ #define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ #define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ +#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */ +#define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */ +#define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */ +#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */ #define I2C_DRIVERID_I2CDEV 900 #define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 5a9d8c599171..dd7d627bf66f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, if (!pool->slab) goto free_name; - pool->mempool = - mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, - pool->slab); + pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); if (!pool->mempool) goto free_slab; diff --git a/include/linux/input.h b/include/linux/input.h index 6d4cc3c110d6..1d4e341b72e6 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -957,7 +957,7 @@ struct input_handler { struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id); void (*disconnect)(struct input_handle *handle); - struct file_operations *fops; + const struct file_operations *fops; int minor; char *name; diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index d6276e60b3bf..0a84b56935c2 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -36,6 +36,7 @@ #include <linux/ipmi_msgdefs.h> #include <linux/compiler.h> +#include <linux/device.h> /* * This file describes an interface to an IPMI driver. You have to @@ -397,7 +398,7 @@ struct ipmi_smi_watcher the watcher list. So you can add and remove users from the IPMI interface, send messages, etc., but you cannot add or remove SMI watchers or SMI interfaces. */ - void (*new_smi)(int if_num); + void (*new_smi)(int if_num, struct device *dev); void (*smi_gone)(int if_num); }; diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h index 03bc64dc2ec1..22f5e2afda4f 100644 --- a/include/linux/ipmi_msgdefs.h +++ b/include/linux/ipmi_msgdefs.h @@ -47,6 +47,7 @@ #define IPMI_NETFN_APP_RESPONSE 0x07 #define IPMI_GET_DEVICE_ID_CMD 0x01 #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 +#define IPMI_GET_DEVICE_GUID_CMD 0x08 #define IPMI_GET_MSG_FLAGS_CMD 0x31 #define IPMI_SEND_MSG_CMD 0x34 #define IPMI_GET_MSG_CMD 0x33 diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index e36ee157ad67..53571288a9fc 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -37,6 +37,9 @@ #include <linux/ipmi_msgdefs.h> #include <linux/proc_fs.h> #include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/ipmi_smi.h> /* This files describes the interface for IPMI system management interface drivers to bind into the IPMI message handler. */ @@ -113,12 +116,52 @@ struct ipmi_smi_handlers void (*dec_usecount)(void *send_info); }; +struct ipmi_device_id { + unsigned char device_id; + unsigned char device_revision; + unsigned char firmware_revision_1; + unsigned char firmware_revision_2; + unsigned char ipmi_version; + unsigned char additional_device_support; + unsigned int manufacturer_id; + unsigned int product_id; + unsigned char aux_firmware_revision[4]; + unsigned int aux_firmware_revision_set : 1; +}; + +#define ipmi_version_major(v) ((v)->ipmi_version & 0xf) +#define ipmi_version_minor(v) ((v)->ipmi_version >> 4) + +/* Take a pointer to a raw data buffer and a length and extract device + id information from it. The first byte of data must point to the + byte from the get device id response after the completion code. + The caller is responsible for making sure the length is at least + 11 and the command completed without error. */ +static inline void ipmi_demangle_device_id(unsigned char *data, + unsigned int data_len, + struct ipmi_device_id *id) +{ + id->device_id = data[0]; + id->device_revision = data[1]; + id->firmware_revision_1 = data[2]; + id->firmware_revision_2 = data[3]; + id->ipmi_version = data[4]; + id->additional_device_support = data[5]; + id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16); + id->product_id = data[9] | (data[10] << 8); + if (data_len >= 15) { + memcpy(id->aux_firmware_revision, data+11, 4); + id->aux_firmware_revision_set = 1; + } else + id->aux_firmware_revision_set = 0; +} + /* Add a low-level interface to the IPMI driver. Note that if the interface doesn't know its slave address, it should pass in zero. */ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, - unsigned char version_major, - unsigned char version_minor, + struct ipmi_device_id *device_id, + struct device *dev, unsigned char slave_addr, ipmi_smi_t *intf); diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 4fc7dffd66ef..6a425e370cb3 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -895,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *); extern void journal_release_buffer (handle_t *, struct buffer_head *); extern int journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); -extern int journal_invalidatepage(journal_t *, +extern void journal_invalidatepage(journal_t *, struct page *, unsigned long); extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int journal_stop(handle_t *); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 03d6cfaa5b8a..a3720f973ea5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -87,7 +87,7 @@ extern int cond_resched(void); (__x < 0) ? -__x : __x; \ }) -extern struct notifier_block *panic_notifier_list; +extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) __attribute__ ((NORET_AND format (printf, 1, 2))); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index a484572c302e..b46249082cca 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq) { int cpu, sum = 0; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) sum += kstat_cpu(cpu).irqs[irq]; return sum; diff --git a/include/linux/ktime.h b/include/linux/ktime.h index f3dec45ef874..62bc57580707 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -64,9 +64,6 @@ typedef union { #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) -/* Define a ktime_t variable and initialize it to zero: */ -#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } - /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value * @@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) /* Map the ktime_t to timeval conversion to ns_to_timeval function */ #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) -/* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */ -#define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64) - /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ #define ktime_to_ns(kt) ((kt).tv64) @@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC */ -/* Define a ktime_t variable and initialize it to zero: */ -#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } - /* Set a ktime_t variable to a value in sec/nsec representation: */ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) { @@ -255,17 +246,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt) } /** - * ktime_to_clock_t - convert a ktime_t variable to clock_t format - * @kt: the ktime_t variable to convert - * - * Returns a clock_t variable with the converted value - */ -static inline clock_t ktime_to_clock_t(const ktime_t kt) -{ - return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec); -} - -/** * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds * @kt: the ktime_t variable to convert * diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h new file mode 100644 index 000000000000..9065199319d0 --- /dev/null +++ b/include/linux/m48t86.h @@ -0,0 +1,16 @@ +/* + * ST M48T86 / Dallas DS12887 RTC driver + * Copyright (c) 2006 Tower Technologies + * + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +struct m48t86_ops +{ + void (*writeb)(unsigned char value, unsigned long addr); + unsigned char (*readb)(unsigned long addr); +}; diff --git a/include/linux/memory.h b/include/linux/memory.h index e251dc43d0f5..8f04143ca363 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) -struct notifier_block; #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index f2427d7394b0..9be484d11283 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -6,6 +6,8 @@ #include <linux/wait.h> +struct kmem_cache; + typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); typedef void (mempool_free_t)(void *element, void *pool_data); @@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool); */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); +static inline mempool_t * +mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) +{ + return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + (void *) kc); +} + +/* + * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree + * the amount of memory specified by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); +void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data); +void mempool_kfree(void *element, void *pool_data); +static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, + (void *) size); +} +static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kzalloc, mempool_kfree, + (void *) size); +} + +/* + * A mempool_alloc_t and mempool_free_t for a simple page allocator that + * allocates pages of the order specified by pool_data + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); +void mempool_free_pages(void *element, void *pool_data); +static inline mempool_t *mempool_create_page_pool(int min_nr, int order) +{ + return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, + (void *)(long)order); +} #endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 14ceebfc1efa..5b584dafb5a6 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -36,7 +36,7 @@ struct class_device; struct miscdevice { int minor; const char *name; - struct file_operations *fops; + const struct file_operations *fops; struct list_head list; struct device *dev; struct class_device *class; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ebfc238cc243..b5c21122c299 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -13,6 +13,7 @@ #include <linux/numa.h> #include <linux/init.h> #include <linux/seqlock.h> +#include <linux/nodemask.h> #include <asm/atomic.h> /* Free memory management - zoned buddy allocator. */ @@ -225,7 +226,6 @@ struct zone { * Discontig memory support fields. */ struct pglist_data *zone_pgdat; - struct page *zone_mem_map; /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; @@ -307,7 +307,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - struct pglist_data *pgdat_next; wait_queue_head_t kswapd_wait; struct task_struct *kswapd; int kswapd_max_order; @@ -324,8 +323,6 @@ typedef struct pglist_data { #include <linux/memory_hotplug.h> -extern struct pglist_data *pgdat_list; - void __get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free, struct pglist_data *pgdat); void get_zone_counts(unsigned long *active, unsigned long *inactive, @@ -350,57 +347,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -/** - * for_each_pgdat - helper macro to iterate over all nodes - * @pgdat - pointer to a pg_data_t variable - * - * Meant to help with common loops of the form - * pgdat = pgdat_list; - * while(pgdat) { - * ... - * pgdat = pgdat->pgdat_next; - * } - */ -#define for_each_pgdat(pgdat) \ - for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next) - -/* - * next_zone - helper magic for for_each_zone() - * Thanks to William Lee Irwin III for this piece of ingenuity. - */ -static inline struct zone *next_zone(struct zone *zone) -{ - pg_data_t *pgdat = zone->zone_pgdat; - - if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) - zone++; - else if (pgdat->pgdat_next) { - pgdat = pgdat->pgdat_next; - zone = pgdat->node_zones; - } else - zone = NULL; - - return zone; -} - -/** - * for_each_zone - helper macro to iterate over all memory zones - * @zone - pointer to struct zone variable - * - * The user only needs to declare the zone variable, for_each_zone - * fills it in. This basically means for_each_zone() is an - * easier to read version of this piece of code: - * - * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) - * for (i = 0; i < MAX_NR_ZONES; ++i) { - * struct zone * z = pgdat->node_zones + i; - * ... - * } - * } - */ -#define for_each_zone(zone) \ - for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) - static inline int populated_zone(struct zone *zone) { return (!!zone->present_pages); @@ -472,6 +418,30 @@ extern struct pglist_data contig_page_data; #endif /* !CONFIG_NEED_MULTIPLE_NODES */ +extern struct pglist_data *first_online_pgdat(void); +extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); +extern struct zone *next_zone(struct zone *zone); + +/** + * for_each_pgdat - helper macro to iterate over all nodes + * @pgdat - pointer to a pg_data_t variable + */ +#define for_each_online_pgdat(pgdat) \ + for (pgdat = first_online_pgdat(); \ + pgdat; \ + pgdat = next_online_pgdat(pgdat)) +/** + * for_each_zone - helper macro to iterate over all memory zones + * @zone - pointer to struct zone variable + * + * The user only needs to declare the zone variable, for_each_zone + * fills it in. + */ +#define for_each_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) + #ifdef CONFIG_SPARSEMEM #include <asm/sparsemem.h> #endif @@ -602,17 +572,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) return __nr_to_section(pfn_to_section_nr(pfn)); } -#define pfn_to_page(pfn) \ -({ \ - unsigned long __pfn = (pfn); \ - __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \ -}) -#define page_to_pfn(page) \ -({ \ - page - __section_mem_map_addr(__nr_to_section( \ - page_to_section(page))); \ -}) - static inline int pfn_valid(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index 779e6a5744c7..d9035c73e5d1 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -334,7 +334,7 @@ extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, unsigned long *mapped_blocks); /* fat/dir.c */ -extern struct file_operations fat_dir_operations; +extern const struct file_operations fat_dir_operations; extern int fat_search_long(struct inode *inode, const unsigned char *name, int name_len, struct fat_slot_info *sinfo); extern int fat_dir_empty(struct inode *dir); @@ -397,7 +397,7 @@ extern int fat_count_free_clusters(struct super_block *sb); /* fat/file.c */ extern int fat_generic_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern struct file_operations fat_file_operations; +extern const struct file_operations fat_file_operations; extern struct inode_operations fat_file_inode_operations; extern int fat_notify_change(struct dentry * dentry, struct iattr * attr); extern void fat_truncate(struct inode *inode); @@ -420,6 +420,9 @@ extern int date_dos2unix(unsigned short time, unsigned short date); extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date); extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); +int fat_cache_init(void); +void fat_cache_destroy(void); + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index e01342568530..96dc237b8f03 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -209,7 +209,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *); /* linux/fs/ncpfs/dir.c */ extern struct inode_operations ncp_dir_inode_operations; -extern struct file_operations ncp_dir_operations; +extern const struct file_operations ncp_dir_operations; int ncp_conn_logged_in(struct super_block *); int ncp_date_dos2unix(__le16 time, __le16 date); void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); @@ -230,7 +230,7 @@ void ncp_unlock_server(struct ncp_server *server); /* linux/fs/ncpfs/file.c */ extern struct inode_operations ncp_file_inode_operations; -extern struct file_operations ncp_file_operations; +extern const struct file_operations ncp_file_operations; int ncp_make_open(struct inode *, int); /* linux/fs/ncpfs/mmap.c */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index f32d75c4f4cf..d54d7b278e96 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); #define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x) -extern struct notifier_block *ip_conntrack_chain; -extern struct notifier_block *ip_conntrack_expect_chain; +extern struct atomic_notifier_head ip_conntrack_chain; +extern struct atomic_notifier_head ip_conntrack_expect_chain; static inline int ip_conntrack_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&ip_conntrack_chain, nb); + return atomic_notifier_chain_register(&ip_conntrack_chain, nb); } static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&ip_conntrack_chain, nb); + return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb); } static inline int ip_conntrack_expect_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&ip_conntrack_expect_chain, nb); + return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb); } static inline int ip_conntrack_expect_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&ip_conntrack_expect_chain, nb); + return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain, + nb); } extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct); @@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event, struct ip_conntrack *ct) { if (is_confirmed(ct) && !is_dying(ct)) - notifier_call_chain(&ip_conntrack_chain, event, ct); + atomic_notifier_call_chain(&ip_conntrack_chain, event, ct); } static inline void ip_conntrack_expect_event(enum ip_conntrack_expect_events event, struct ip_conntrack_expect *exp) { - notifier_call_chain(&ip_conntrack_expect_chain, event, exp); + atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp); } #else /* CONFIG_IP_NF_CONNTRACK_EVENTS */ static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index cbebd7d1b9e8..c71227dd4389 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -324,7 +324,7 @@ extern struct inode_operations nfs_file_inode_operations; #ifdef CONFIG_NFS_V3 extern struct inode_operations nfs3_file_inode_operations; #endif /* CONFIG_NFS_V3 */ -extern struct file_operations nfs_file_operations; +extern const struct file_operations nfs_file_operations; extern struct address_space_operations nfs_file_aops; static inline struct rpc_cred *nfs_file_cred(struct file *file) @@ -371,7 +371,7 @@ extern struct inode_operations nfs_dir_inode_operations; #ifdef CONFIG_NFS_V3 extern struct inode_operations nfs3_dir_inode_operations; #endif /* CONFIG_NFS_V3 */ -extern struct file_operations nfs_dir_operations; +extern const struct file_operations nfs_dir_operations; extern struct dentry_operations nfs_dentry_operations; extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 6bad4766d3d9..d2a8abb5011a 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -67,7 +67,8 @@ struct svc_expkey { int ek_fsidtype; u32 ek_fsid[3]; - struct svc_export * ek_export; + struct vfsmount * ek_mnt; + struct dentry * ek_dentry; }; #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) @@ -85,9 +86,6 @@ void nfsd_export_shutdown(void); void nfsd_export_flush(void); void exp_readlock(void); void exp_readunlock(void); -struct svc_expkey * exp_find_key(struct auth_domain *clp, - int fsid_type, u32 *fsidv, - struct cache_req *reqp); struct svc_export * exp_get_by_name(struct auth_domain *clp, struct vfsmount *mnt, struct dentry *dentry, @@ -101,35 +99,20 @@ int exp_rootfh(struct auth_domain *, int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); int nfserrno(int errno); -extern void expkey_put(struct cache_head *item, struct cache_detail *cd); -extern void svc_export_put(struct cache_head *item, struct cache_detail *cd); -extern struct cache_detail svc_export_cache, svc_expkey_cache; +extern struct cache_detail svc_export_cache; static inline void exp_put(struct svc_export *exp) { - svc_export_put(&exp->h, &svc_export_cache); + cache_put(&exp->h, &svc_export_cache); } static inline void exp_get(struct svc_export *exp) { cache_get(&exp->h); } -static inline struct svc_export * +extern struct svc_export * exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, - struct cache_req *reqp) -{ - struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); - if (ek && !IS_ERR(ek)) { - struct svc_export *exp = ek->ek_export; - int err; - exp_get(exp); - expkey_put(&ek->h, &svc_expkey_cache); - if ((err = cache_check(&svc_export_cache, &exp->h, reqp))) - exp = ERR_PTR(err); - return exp; - } else - return ERR_PTR(PTR_ERR(ek)); -} + struct cache_req *reqp); #endif /* __KERNEL__ */ diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index b959a4525cbd..1a9ef3e627d1 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -350,11 +350,15 @@ extern nodemask_t node_possible_map; #define num_possible_nodes() nodes_weight(node_possible_map) #define node_online(node) node_isset((node), node_online_map) #define node_possible(node) node_isset((node), node_possible_map) +#define first_online_node first_node(node_online_map) +#define next_online_node(nid) next_node((nid), node_online_map) #else #define num_online_nodes() 1 #define num_possible_nodes() 1 #define node_online(node) ((node) == 0) #define node_possible(node) ((node) == 0) +#define first_online_node 0 +#define next_online_node(nid) (MAX_NUMNODES) #endif #define any_online_node(mask) \ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 5937dd6053c3..51dbab9710c7 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -10,25 +10,107 @@ #ifndef _LINUX_NOTIFIER_H #define _LINUX_NOTIFIER_H #include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/rwsem.h> -struct notifier_block -{ - int (*notifier_call)(struct notifier_block *self, unsigned long, void *); +/* + * Notifier chains are of three types: + * + * Atomic notifier chains: Chain callbacks run in interrupt/atomic + * context. Callouts are not allowed to block. + * Blocking notifier chains: Chain callbacks run in process context. + * Callouts are allowed to block. + * Raw notifier chains: There are no restrictions on callbacks, + * registration, or unregistration. All locking and protection + * must be provided by the caller. + * + * atomic_notifier_chain_register() may be called from an atomic context, + * but blocking_notifier_chain_register() must be called from a process + * context. Ditto for the corresponding _unregister() routines. + * + * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister() + * _must not_ be called from within the call chain. + */ + +struct notifier_block { + int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; }; +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ + spin_lock_init(&(name)->lock); \ + (name)->head = NULL; \ + } while (0) +#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \ + init_rwsem(&(name)->rwsem); \ + (name)->head = NULL; \ + } while (0) +#define RAW_INIT_NOTIFIER_HEAD(name) do { \ + (name)->head = NULL; \ + } while (0) + +#define ATOMIC_NOTIFIER_INIT(name) { \ + .lock = SPIN_LOCK_UNLOCKED, \ + .head = NULL } +#define BLOCKING_NOTIFIER_INIT(name) { \ + .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ + .head = NULL } +#define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } + +#define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ + ATOMIC_NOTIFIER_INIT(name) +#define BLOCKING_NOTIFIER_HEAD(name) \ + struct blocking_notifier_head name = \ + BLOCKING_NOTIFIER_INIT(name) +#define RAW_NOTIFIER_HEAD(name) \ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) #ifdef __KERNEL__ -extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n); -extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n); -extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v); +extern int atomic_notifier_chain_register(struct atomic_notifier_head *, + struct notifier_block *); +extern int blocking_notifier_chain_register(struct blocking_notifier_head *, + struct notifier_block *); +extern int raw_notifier_chain_register(struct raw_notifier_head *, + struct notifier_block *); + +extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, + struct notifier_block *); +extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *, + struct notifier_block *); +extern int raw_notifier_chain_unregister(struct raw_notifier_head *, + struct notifier_block *); + +extern int atomic_notifier_call_chain(struct atomic_notifier_head *, + unsigned long val, void *v); +extern int blocking_notifier_call_chain(struct blocking_notifier_head *, + unsigned long val, void *v); +extern int raw_notifier_call_chain(struct raw_notifier_head *, + unsigned long val, void *v); #define NOTIFY_DONE 0x0000 /* Don't care */ #define NOTIFY_OK 0x0001 /* Suits me */ #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ -#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */ +#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) + /* Bad/Veto action */ /* * Clean way to return from the notifier and stop further calls. */ diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 559c4c38a9c7..0d514b252454 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -61,6 +61,16 @@ void oprofile_arch_exit(void); */ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); +/** + * Add an extended sample. Use this when the PC is not from the regs, and + * we cannot determine if we're in kernel mode from the regs. + * + * This function does perform a backtrace. + * + */ +void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel); + /* Use this instead when the PC value is not from the regs. Doesn't * backtrace. */ void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); @@ -74,10 +84,10 @@ void oprofile_add_trace(unsigned long eip); * the specified file operations. */ int oprofilefs_create_file(struct super_block * sb, struct dentry * root, - char const * name, struct file_operations * fops); + char const * name, const struct file_operations * fops); int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, - char const * name, struct file_operations * fops, int perm); + char const * name, const struct file_operations * fops, int perm); /** Create a file for read/write access to an unsigned long. */ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6f080ae59286..e2ab2ac18d6b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -642,6 +642,7 @@ #define PCI_DEVICE_ID_SI_965 0x0965 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 +#define PCI_DEVICE_ID_SI_5517 0x5517 #define PCI_DEVICE_ID_SI_5518 0x5518 #define PCI_DEVICE_ID_SI_5571 0x5571 #define PCI_DEVICE_ID_SI_5581 0x5581 @@ -1052,6 +1053,7 @@ #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 +#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 #define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 diff --git a/include/linux/pfn.h b/include/linux/pfn.h new file mode 100644 index 000000000000..bb01f8b92b56 --- /dev/null +++ b/include/linux/pfn.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_PFN_H_ +#define _LINUX_PFN_H_ + +#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((x) << PAGE_SHIFT) + +#endif diff --git a/include/linux/poll.h b/include/linux/poll.h index 8e8f6098508a..51e1b56741fb 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -11,6 +11,15 @@ #include <linux/mm.h> #include <asm/uaccess.h> +/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating + additional memory. */ +#define MAX_STACK_ALLOC 832 +#define FRONTEND_STACK_ALLOC 256 +#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC +#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC +#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) +#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) + struct poll_table_struct; /* @@ -33,6 +42,12 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) pt->qproc = qproc; } +struct poll_table_entry { + struct file * filp; + wait_queue_t wait; + wait_queue_head_t * wait_address; +}; + /* * Structures and helpers for sys_poll/sys_poll */ @@ -40,6 +55,8 @@ struct poll_wqueues { poll_table pt; struct poll_table_page * table; int error; + int inline_index; + struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; }; extern void poll_initwait(struct poll_wqueues *pwq); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index aa6322d45198..135871df9911 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -4,6 +4,7 @@ #include <linux/config.h> #include <linux/slab.h> #include <linux/fs.h> +#include <linux/spinlock.h> #include <asm/atomic.h> /* @@ -55,9 +56,9 @@ struct proc_dir_entry { nlink_t nlink; uid_t uid; gid_t gid; - unsigned long size; + loff_t size; struct inode_operations * proc_iops; - struct file_operations * proc_fops; + const struct file_operations * proc_fops; get_info_t *get_info; struct module *owner; struct proc_dir_entry *next, *parent, *subdir; @@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus; extern struct proc_dir_entry *proc_root_driver; extern struct proc_dir_entry *proc_root_kcore; +extern spinlock_t proc_subdir_lock; + extern void proc_root_init(void); extern void proc_misc_init(void); @@ -125,9 +128,9 @@ extern int proc_match(int, const char *,struct proc_dir_entry *); extern int proc_readdir(struct file *, void *, filldir_t); extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); -extern struct file_operations proc_kcore_operations; -extern struct file_operations proc_kmsg_operations; -extern struct file_operations ppc_htab_operations; +extern const struct file_operations proc_kcore_operations; +extern const struct file_operations proc_kmsg_operations; +extern const struct file_operations ppc_htab_operations; /* * proc_tty.c @@ -186,7 +189,7 @@ static inline struct proc_dir_entry *proc_net_create(const char *name, } static inline struct proc_dir_entry *proc_net_fops_create(const char *name, - mode_t mode, struct file_operations *fops) + mode_t mode, const struct file_operations *fops) { struct proc_dir_entry *res = create_proc_entry(name, mode, proc_net); if (res) diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index fc610bb0f733..27f49c85d5d6 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h @@ -118,8 +118,8 @@ extern struct buffer_head *qnx4_bread(struct inode *, int, int); extern struct inode_operations qnx4_file_inode_operations; extern struct inode_operations qnx4_dir_inode_operations; -extern struct file_operations qnx4_file_operations; -extern struct file_operations qnx4_dir_operations; +extern const struct file_operations qnx4_file_operations; +extern const struct file_operations qnx4_dir_operations; extern int qnx4_is_free(struct super_block *sb, long block); extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index b6e0bcad84e1..66b44e5e0d6e 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -92,7 +92,10 @@ extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, extern void md_super_wait(mddev_t *mddev); extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); +extern void md_do_sync(mddev_t *mddev); +extern void md_new_event(mddev_t *mddev); +extern void md_update_sb(mddev_t * mddev); #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 617b9506c760..e2df61f5b09a 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -132,6 +132,14 @@ struct mddev_s char uuid[16]; + /* If the array is being reshaped, we need to record the + * new shape and an indication of where we are up to. + * This is written to the superblock. + * If reshape_position is MaxSector, then no reshape is happening (yet). + */ + sector_t reshape_position; + int delta_disks, new_level, new_layout, new_chunk; + struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* blocks scheduled */ @@ -143,6 +151,10 @@ struct mddev_s sector_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ + + /* allow user-space to request suspension of IO to regions of the array */ + sector_t suspend_lo; + sector_t suspend_hi; /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; @@ -157,6 +169,9 @@ struct mddev_s * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) * CHECK: user-space request for for check-only, no repair + * RESHAPE: A reshape is happening + * + * If neither SYNC or RESHAPE are set, then it is a recovery. */ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 @@ -166,10 +181,11 @@ struct mddev_s #define MD_RECOVERY_NEEDED 5 #define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_CHECK 7 +#define MD_RECOVERY_RESHAPE 8 unsigned long recovery; int in_sync; /* know to not need resync */ - struct semaphore reconfig_sem; + struct mutex reconfig_mutex; atomic_t active; int changed; /* true if we might need to reread partition info */ @@ -249,7 +265,8 @@ struct mdk_personality int (*spare_active) (mddev_t *mddev); sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); int (*resize) (mddev_t *mddev, sector_t sectors); - int (*reshape) (mddev_t *mddev, int raid_disks); + int (*check_reshape) (mddev_t *mddev); + int (*start_reshape) (mddev_t *mddev); int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); /* quiesce moves between quiescence states * 0 - fully active diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index c100fa5d4bfa..774e1acfb8c4 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -102,6 +102,18 @@ typedef struct mdp_device_descriptor_s { #define MD_SB_ERRORS 1 #define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ + +/* + * Notes: + * - if an array is being reshaped (restriped) in order to change the + * the number of active devices in the array, 'raid_disks' will be + * the larger of the old and new numbers. 'delta_disks' will + * be the "new - old". So if +ve, raid_disks is the new value, and + * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the + * old value and "raid_disks+delta_disks" is the new (smaller) value. + */ + + typedef struct mdp_superblock_s { /* * Constant generic information @@ -146,7 +158,13 @@ typedef struct mdp_superblock_s { __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ #endif __u32 recovery_cp; /* 11 recovery checkpoint sector count */ - __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 12]; + /* There are only valid for minor_version > 90 */ + __u64 reshape_position; /* 12,13 next address in array-space for reshape */ + __u32 new_level; /* 14 new level we are reshaping to */ + __u32 delta_disks; /* 15 change in number of raid_disks */ + __u32 new_layout; /* 16 new layout */ + __u32 new_chunk; /* 17 new chunk size (bytes) */ + __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18]; /* * Personality information @@ -207,7 +225,14 @@ struct mdp_superblock_1 { * NOTE: signed, so bitmap can be before superblock * only meaningful of feature_map[0] is set. */ - __u8 pad1[128-100]; /* set to 0 when written */ + + /* These are only valid with feature bit '4' */ + __u64 reshape_position; /* next address in array-space for reshape */ + __u32 new_level; /* new level we are reshaping to */ + __u32 delta_disks; /* change in number of raid_disks */ + __u32 new_layout; /* new layout */ + __u32 new_chunk; /* new chunk size (bytes) */ + __u8 pad1[128-124]; /* set to 0 when written */ /* constant this-device information - 64 bytes */ __u64 data_offset; /* sector start of data, often 0 */ @@ -240,8 +265,9 @@ struct mdp_superblock_1 { /* feature_map bits */ #define MD_FEATURE_BITMAP_OFFSET 1 +#define MD_FEATURE_RESHAPE_ACTIVE 4 -#define MD_FEATURE_ALL 1 +#define MD_FEATURE_ALL 5 #endif diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 394da8207b34..914af667044f 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -135,6 +135,7 @@ struct stripe_head { atomic_t count; /* nr of active thread/requests */ spinlock_t lock; int bm_seq; /* sequence number for bitmap flushes */ + int disks; /* disks in stripe */ struct r5dev { struct bio req; struct bio_vec vec; @@ -156,6 +157,7 @@ struct stripe_head { #define R5_ReadError 8 /* seen a read error here recently */ #define R5_ReWrite 9 /* have tried to over-write the readerror */ +#define R5_Expanded 10 /* This block now has post-expand data */ /* * Write method */ @@ -174,7 +176,9 @@ struct stripe_head { #define STRIPE_DELAYED 6 #define STRIPE_DEGRADED 7 #define STRIPE_BIT_DELAY 8 - +#define STRIPE_EXPANDING 9 +#define STRIPE_EXPAND_SOURCE 10 +#define STRIPE_EXPAND_READY 11 /* * Plugging: * @@ -211,12 +215,24 @@ struct raid5_private_data { int raid_disks, working_disks, failed_disks; int max_nr_stripes; + /* used during an expand */ + sector_t expand_progress; /* MaxSector when no expand happening */ + sector_t expand_lo; /* from here up to expand_progress it out-of-bounds + * as we haven't flushed the metadata yet + */ + int previous_raid_disks; + struct list_head handle_list; /* stripes needing handling */ struct list_head delayed_list; /* stripes that have plugged requests */ struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ atomic_t preread_active_stripes; /* stripes with scheduled io */ - char cache_name[20]; + atomic_t reshape_stripes; /* stripes with pending writes for reshape */ + /* unfortunately we need two cache names as we temporarily have + * two caches. + */ + int active_name; + char cache_name[2][20]; kmem_cache_t *slab_cache; /* for allocating stripes */ int seq_flush, seq_write; @@ -238,9 +254,10 @@ struct raid5_private_data { wait_queue_head_t wait_for_overlap; int inactive_blocked; /* release of inactive stripes blocked, * waiting for 25% to be free - */ + */ + int pool_size; /* number of disks in stripeheads in pool */ spinlock_t device_lock; - struct disk_info disks[0]; + struct disk_info *disks; }; typedef struct raid5_private_data raid5_conf_t; diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 953b6df5d037..78ecfa28b1c2 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -15,7 +15,7 @@ extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); #endif -extern struct file_operations ramfs_file_operations; +extern const struct file_operations ramfs_file_operations; extern struct vm_operations_struct generic_file_vm_ops; #endif diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 912f1b7cb18f..5676c4210e2c 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1960,7 +1960,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, extern struct inode_operations reiserfs_dir_inode_operations; extern struct inode_operations reiserfs_symlink_inode_operations; extern struct inode_operations reiserfs_special_inode_operations; -extern struct file_operations reiserfs_dir_operations; +extern const struct file_operations reiserfs_dir_operations; /* tail_conversion.c */ int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, @@ -1972,7 +1972,7 @@ void reiserfs_unmap_buffer(struct buffer_head *); /* file.c */ extern struct inode_operations reiserfs_file_inode_operations; -extern struct file_operations reiserfs_file_operations; +extern const struct file_operations reiserfs_file_operations; extern struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ diff --git a/include/linux/relayfs_fs.h b/include/linux/relayfs_fs.h deleted file mode 100644 index 7342e66247fb..000000000000 --- a/include/linux/relayfs_fs.h +++ /dev/null @@ -1,287 +0,0 @@ -/* - * linux/include/linux/relayfs_fs.h - * - * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp - * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) - * - * RelayFS definitions and declarations - */ - -#ifndef _LINUX_RELAYFS_FS_H -#define _LINUX_RELAYFS_FS_H - -#include <linux/config.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/list.h> -#include <linux/fs.h> -#include <linux/poll.h> -#include <linux/kref.h> - -/* - * Tracks changes to rchan/rchan_buf structs - */ -#define RELAYFS_CHANNEL_VERSION 6 - -/* - * Per-cpu relay channel buffer - */ -struct rchan_buf -{ - void *start; /* start of channel buffer */ - void *data; /* start of current sub-buffer */ - size_t offset; /* current offset into sub-buffer */ - size_t subbufs_produced; /* count of sub-buffers produced */ - size_t subbufs_consumed; /* count of sub-buffers consumed */ - struct rchan *chan; /* associated channel */ - wait_queue_head_t read_wait; /* reader wait queue */ - struct work_struct wake_readers; /* reader wake-up work struct */ - struct dentry *dentry; /* channel file dentry */ - struct kref kref; /* channel buffer refcount */ - struct page **page_array; /* array of current buffer pages */ - unsigned int page_count; /* number of current buffer pages */ - unsigned int finalized; /* buffer has been finalized */ - size_t *padding; /* padding counts per sub-buffer */ - size_t prev_padding; /* temporary variable */ - size_t bytes_consumed; /* bytes consumed in cur read subbuf */ - unsigned int cpu; /* this buf's cpu */ -} ____cacheline_aligned; - -/* - * Relay channel data structure - */ -struct rchan -{ - u32 version; /* the version of this struct */ - size_t subbuf_size; /* sub-buffer size */ - size_t n_subbufs; /* number of sub-buffers per buffer */ - size_t alloc_size; /* total buffer size allocated */ - struct rchan_callbacks *cb; /* client callbacks */ - struct kref kref; /* channel refcount */ - void *private_data; /* for user-defined data */ - size_t last_toobig; /* tried to log event > subbuf size */ - struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ -}; - -/* - * Relay channel client callbacks - */ -struct rchan_callbacks -{ - /* - * subbuf_start - called on buffer-switch to a new sub-buffer - * @buf: the channel buffer containing the new sub-buffer - * @subbuf: the start of the new sub-buffer - * @prev_subbuf: the start of the previous sub-buffer - * @prev_padding: unused space at the end of previous sub-buffer - * - * The client should return 1 to continue logging, 0 to stop - * logging. - * - * NOTE: subbuf_start will also be invoked when the buffer is - * created, so that the first sub-buffer can be initialized - * if necessary. In this case, prev_subbuf will be NULL. - * - * NOTE: the client can reserve bytes at the beginning of the new - * sub-buffer by calling subbuf_start_reserve() in this callback. - */ - int (*subbuf_start) (struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - size_t prev_padding); - - /* - * buf_mapped - relayfs buffer mmap notification - * @buf: the channel buffer - * @filp: relayfs file pointer - * - * Called when a relayfs file is successfully mmapped - */ - void (*buf_mapped)(struct rchan_buf *buf, - struct file *filp); - - /* - * buf_unmapped - relayfs buffer unmap notification - * @buf: the channel buffer - * @filp: relayfs file pointer - * - * Called when a relayfs file is successfully unmapped - */ - void (*buf_unmapped)(struct rchan_buf *buf, - struct file *filp); - /* - * create_buf_file - create file to represent a relayfs channel buffer - * @filename: the name of the file to create - * @parent: the parent of the file to create - * @mode: the mode of the file to create - * @buf: the channel buffer - * @is_global: outparam - set non-zero if the buffer should be global - * - * Called during relay_open(), once for each per-cpu buffer, - * to allow the client to create a file to be used to - * represent the corresponding channel buffer. If the file is - * created outside of relayfs, the parent must also exist in - * that filesystem. - * - * The callback should return the dentry of the file created - * to represent the relay buffer. - * - * Setting the is_global outparam to a non-zero value will - * cause relay_open() to create a single global buffer rather - * than the default set of per-cpu buffers. - * - * See Documentation/filesystems/relayfs.txt for more info. - */ - struct dentry *(*create_buf_file)(const char *filename, - struct dentry *parent, - int mode, - struct rchan_buf *buf, - int *is_global); - - /* - * remove_buf_file - remove file representing a relayfs channel buffer - * @dentry: the dentry of the file to remove - * - * Called during relay_close(), once for each per-cpu buffer, - * to allow the client to remove a file used to represent a - * channel buffer. - * - * The callback should return 0 if successful, negative if not. - */ - int (*remove_buf_file)(struct dentry *dentry); -}; - -/* - * relayfs kernel API, fs/relayfs/relay.c - */ - -struct rchan *relay_open(const char *base_filename, - struct dentry *parent, - size_t subbuf_size, - size_t n_subbufs, - struct rchan_callbacks *cb); -extern void relay_close(struct rchan *chan); -extern void relay_flush(struct rchan *chan); -extern void relay_subbufs_consumed(struct rchan *chan, - unsigned int cpu, - size_t consumed); -extern void relay_reset(struct rchan *chan); -extern int relay_buf_full(struct rchan_buf *buf); - -extern size_t relay_switch_subbuf(struct rchan_buf *buf, - size_t length); -extern struct dentry *relayfs_create_dir(const char *name, - struct dentry *parent); -extern int relayfs_remove_dir(struct dentry *dentry); -extern struct dentry *relayfs_create_file(const char *name, - struct dentry *parent, - int mode, - struct file_operations *fops, - void *data); -extern int relayfs_remove_file(struct dentry *dentry); - -/** - * relay_write - write data into the channel - * @chan: relay channel - * @data: data to be written - * @length: number of bytes to write - * - * Writes data into the current cpu's channel buffer. - * - * Protects the buffer by disabling interrupts. Use this - * if you might be logging from interrupt context. Try - * __relay_write() if you know you won't be logging from - * interrupt context. - */ -static inline void relay_write(struct rchan *chan, - const void *data, - size_t length) -{ - unsigned long flags; - struct rchan_buf *buf; - - local_irq_save(flags); - buf = chan->buf[smp_processor_id()]; - if (unlikely(buf->offset + length > chan->subbuf_size)) - length = relay_switch_subbuf(buf, length); - memcpy(buf->data + buf->offset, data, length); - buf->offset += length; - local_irq_restore(flags); -} - -/** - * __relay_write - write data into the channel - * @chan: relay channel - * @data: data to be written - * @length: number of bytes to write - * - * Writes data into the current cpu's channel buffer. - * - * Protects the buffer by disabling preemption. Use - * relay_write() if you might be logging from interrupt - * context. - */ -static inline void __relay_write(struct rchan *chan, - const void *data, - size_t length) -{ - struct rchan_buf *buf; - - buf = chan->buf[get_cpu()]; - if (unlikely(buf->offset + length > buf->chan->subbuf_size)) - length = relay_switch_subbuf(buf, length); - memcpy(buf->data + buf->offset, data, length); - buf->offset += length; - put_cpu(); -} - -/** - * relay_reserve - reserve slot in channel buffer - * @chan: relay channel - * @length: number of bytes to reserve - * - * Returns pointer to reserved slot, NULL if full. - * - * Reserves a slot in the current cpu's channel buffer. - * Does not protect the buffer at all - caller must provide - * appropriate synchronization. - */ -static inline void *relay_reserve(struct rchan *chan, size_t length) -{ - void *reserved; - struct rchan_buf *buf = chan->buf[smp_processor_id()]; - - if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { - length = relay_switch_subbuf(buf, length); - if (!length) - return NULL; - } - reserved = buf->data + buf->offset; - buf->offset += length; - - return reserved; -} - -/** - * subbuf_start_reserve - reserve bytes at the start of a sub-buffer - * @buf: relay channel buffer - * @length: number of bytes to reserve - * - * Helper function used to reserve bytes at the beginning of - * a sub-buffer in the subbuf_start() callback. - */ -static inline void subbuf_start_reserve(struct rchan_buf *buf, - size_t length) -{ - BUG_ON(length >= buf->chan->subbuf_size - 1); - buf->offset = length; -} - -/* - * exported relay file operations, fs/relayfs/inode.c - */ -extern struct file_operations relay_file_operations; - -#endif /* _LINUX_RELAYFS_FS_H */ - diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b739ac1f7ca0..ab61cd1199f2 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -91,10 +91,102 @@ struct rtc_pll_info { #define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ +/* interrupt flags */ +#define RTC_IRQF 0x80 /* any of the following is active */ +#define RTC_PF 0x40 +#define RTC_AF 0x20 +#define RTC_UF 0x10 + #ifdef __KERNEL__ #include <linux/interrupt.h> +extern int rtc_month_days(unsigned int month, unsigned int year); +extern int rtc_valid_tm(struct rtc_time *tm); +extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); +extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); + +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/mutex.h> + +extern struct class *rtc_class; + +struct rtc_class_ops { + int (*open)(struct device *); + void (*release)(struct device *); + int (*ioctl)(struct device *, unsigned int, unsigned long); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*set_mmss)(struct device *, unsigned long secs); + int (*irq_set_state)(struct device *, int enabled); + int (*irq_set_freq)(struct device *, int freq); + int (*read_callback)(struct device *, int data); +}; + +#define RTC_DEVICE_NAME_SIZE 20 +struct rtc_task; + +struct rtc_device +{ + struct class_device class_dev; + struct module *owner; + + int id; + char name[RTC_DEVICE_NAME_SIZE]; + + struct rtc_class_ops *ops; + struct mutex ops_lock; + + struct class_device *rtc_dev; + struct cdev char_dev; + struct mutex char_lock; + + unsigned long irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + + struct rtc_task *irq_task; + spinlock_t irq_task_lock; + int irq_freq; +}; +#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev) + +extern struct rtc_device *rtc_device_register(const char *name, + struct device *dev, + struct rtc_class_ops *ops, + struct module *owner); +extern void rtc_device_unregister(struct rtc_device *rdev); +extern int rtc_interface_register(struct class_interface *intf); + +extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm); +extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm); +extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs); +extern int rtc_read_alarm(struct class_device *class_dev, + struct rtc_wkalrm *alrm); +extern int rtc_set_alarm(struct class_device *class_dev, + struct rtc_wkalrm *alrm); +extern void rtc_update_irq(struct class_device *class_dev, + unsigned long num, unsigned long events); + +extern struct class_device *rtc_class_open(char *name); +extern void rtc_class_close(struct class_device *class_dev); + +extern int rtc_irq_register(struct class_device *class_dev, + struct rtc_task *task); +extern void rtc_irq_unregister(struct class_device *class_dev, + struct rtc_task *task); +extern int rtc_irq_set_state(struct class_device *class_dev, + struct rtc_task *task, int enabled); +extern int rtc_irq_set_freq(struct class_device *class_dev, + struct rtc_task *task, int freq); + typedef struct rtc_task { void (*func)(void *private_data); void *private_data; diff --git a/include/linux/sched.h b/include/linux/sched.h index e0054c1b9a09..20b4f0372e44 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -35,6 +35,7 @@ #include <linux/topology.h> #include <linux/seccomp.h> #include <linux/rcupdate.h> +#include <linux/futex.h> #include <linux/auxvec.h> /* For AT_VECTOR_SIZE */ @@ -402,6 +403,7 @@ struct signal_struct { /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; + struct task_struct *tsk; ktime_t it_real_incr; /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ @@ -871,6 +873,11 @@ struct task_struct { int cpuset_mems_generation; int cpuset_mem_spread_rotor; #endif + struct robust_list_head __user *robust_list; +#ifdef CONFIG_COMPAT + struct compat_robust_list_head __user *compat_robust_list; +#endif + atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; }; diff --git a/include/linux/serio.h b/include/linux/serio.h index aa4d6493a034..690aabca8ed0 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio) } /* - * Use the following fucntions to manipulate serio's per-port + * Use the following functions to manipulate serio's per-port * driver-specific data. */ static inline void *serio_get_drvdata(struct serio *serio) @@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data) } /* - * Use the following fucntions to protect critical sections in + * Use the following functions to protect critical sections in * driver code from port's interrupt handler */ static inline void serio_pause_rx(struct serio *serio) @@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio) } /* - * Use the following fucntions to pin serio's driver in process context + * Use the following functions to pin serio's driver in process context */ static inline int serio_pin_driver(struct serio *serio) { diff --git a/include/linux/smp.h b/include/linux/smp.h index d699a16b0cb2..e2fa3ab4afc5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void); */ #define raw_smp_processor_id() 0 #define hard_smp_processor_id() 0 -#define smp_call_function(func,info,retry,wait) ({ 0; }) +static inline int up_smp_call_function(void) +{ + return 0; +} +#define smp_call_function(func,info,retry,wait) (up_smp_call_function()) #define on_each_cpu(func,info,retry,wait) \ ({ \ local_irq_disable(); \ diff --git a/include/linux/sound.h b/include/linux/sound.h index 72b9af4c3fd4..f63d8342ffa3 100644 --- a/include/linux/sound.h +++ b/include/linux/sound.h @@ -30,12 +30,12 @@ */ struct device; -extern int register_sound_special(struct file_operations *fops, int unit); -extern int register_sound_special_device(struct file_operations *fops, int unit, struct device *dev); -extern int register_sound_mixer(struct file_operations *fops, int dev); -extern int register_sound_midi(struct file_operations *fops, int dev); -extern int register_sound_dsp(struct file_operations *fops, int dev); -extern int register_sound_synth(struct file_operations *fops, int dev); +extern int register_sound_special(const struct file_operations *fops, int unit); +extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev); +extern int register_sound_mixer(const struct file_operations *fops, int dev); +extern int register_sound_midi(const struct file_operations *fops, int dev); +extern int register_sound_dsp(const struct file_operations *fops, int dev); +extern int register_sound_synth(const struct file_operations *fops, int dev); extern void unregister_sound_special(int unit); extern void unregister_sound_mixer(int unit); diff --git a/include/linux/stat.h b/include/linux/stat.h index 8ff2a122dfef..8669291352db 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -69,7 +69,7 @@ struct kstat { struct timespec mtime; struct timespec ctime; unsigned long blksize; - unsigned long blocks; + unsigned long long blocks; }; #endif diff --git a/include/linux/statfs.h b/include/linux/statfs.h index ad83a2bdb821..b34cc829f98d 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h @@ -8,11 +8,11 @@ struct kstatfs { long f_type; long f_bsize; - sector_t f_blocks; - sector_t f_bfree; - sector_t f_bavail; - sector_t f_files; - sector_t f_ffree; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; __kernel_fsid_t f_fsid; long f_namelen; long f_frsize; diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index c4e3ea7cf154..b5612c958cce 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -50,7 +50,7 @@ struct cache_head { time_t last_refresh; /* If CACHE_PENDING, this is when upcall * was sent, else this is when update was received */ - atomic_t refcnt; + struct kref ref; unsigned long flags; }; #define CACHE_VALID 0 /* Entry contains valid data */ @@ -68,8 +68,7 @@ struct cache_detail { atomic_t inuse; /* active user-space update or lookup */ char *name; - void (*cache_put)(struct cache_head *, - struct cache_detail*); + void (*cache_put)(struct kref *); void (*cache_request)(struct cache_detail *cd, struct cache_head *h, @@ -81,6 +80,11 @@ struct cache_detail { struct cache_detail *cd, struct cache_head *h); + struct cache_head * (*alloc)(void); + int (*match)(struct cache_head *orig, struct cache_head *new); + void (*init)(struct cache_head *orig, struct cache_head *new); + void (*update)(struct cache_head *orig, struct cache_head *new); + /* fields below this comment are for internal use * and should not be touched by cache owners */ @@ -123,126 +127,14 @@ struct cache_deferred_req { int too_many); }; -/* - * just like a template in C++, this macro does cache lookup - * for us. - * The function is passed some sort of HANDLE from which a cache_detail - * structure can be determined (via SETUP, DETAIL), a template - * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the - * TEST, the function will try to find a matching cache entry in the cache. - * If "set" == 0 : - * If an entry is found, it is returned - * If no entry is found, a new non-VALID entry is created. - * If "set" == 1 and INPLACE == 0 : - * If no entry is found a new one is inserted with data from "template" - * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE - * If a CACHE_VALID entry is found, a new entry is swapped in with data - * from "template" - * If set == 1, and INPLACE == 1 : - * As above, except that if a CACHE_VALID entry is found, we UPDATE in place - * instead of swapping in a new entry. - * - * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not - * run but insteead CACHE_NEGATIVE is set in any new item. - * In any case, the new entry is returned with a reference count. - * - * - * RTN is a struct type for a cache entry - * MEMBER is the member of the cache which is cache_head, which must be first - * FNAME is the name for the function - * ARGS are arguments to function and must contain RTN *item, int set. May - * also contain something to be usedby SETUP or DETAIL to find cache_detail. - * SETUP locates the cache detail and makes it available as... - * DETAIL identifies the cache detail, possibly set up by SETUP - * HASHFN returns a hash value of the cache entry "item" - * TEST tests if "tmp" matches "item" - * INIT copies key information from "item" to "new" - * UPDATE copies content information from "item" to "tmp" - * INPLACE is true if updates can happen inplace rather than allocating a new structure - * - * WARNING: any substantial changes to this must be reflected in - * net/sunrpc/svcauth.c(auth_domain_lookup) - * which is a similar routine that is open-coded. - */ -#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \ -RTN *FNAME ARGS \ -{ \ - RTN *tmp, *new=NULL; \ - struct cache_head **hp, **head; \ - SETUP; \ - head = &(DETAIL)->hash_table[HASHFN]; \ - retry: \ - if (set||new) write_lock(&(DETAIL)->hash_lock); \ - else read_lock(&(DETAIL)->hash_lock); \ - for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \ - tmp = container_of(*hp, RTN, MEMBER); \ - if (TEST) { /* found a match */ \ - \ - if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \ - break; \ - \ - if (new) \ - {INIT;} \ - if (set) { \ - if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\ - { /* need to swap in new */ \ - RTN *t2; \ - \ - new->MEMBER.next = tmp->MEMBER.next; \ - *hp = &new->MEMBER; \ - tmp->MEMBER.next = NULL; \ - t2 = tmp; tmp = new; new = t2; \ - } \ - if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ - set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ - else { \ - UPDATE; \ - clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ - } \ - } \ - cache_get(&tmp->MEMBER); \ - if (set||new) write_unlock(&(DETAIL)->hash_lock); \ - else read_unlock(&(DETAIL)->hash_lock); \ - if (set) \ - cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \ - if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \ - if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \ - return tmp; \ - } \ - } \ - /* Didn't find anything */ \ - if (new) { \ - INIT; \ - new->MEMBER.next = *head; \ - *head = &new->MEMBER; \ - (DETAIL)->entries ++; \ - cache_get(&new->MEMBER); \ - if (set) { \ - tmp = new; \ - if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ - set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ - else {UPDATE;} \ - } \ - } \ - if (set||new) write_unlock(&(DETAIL)->hash_lock); \ - else read_unlock(&(DETAIL)->hash_lock); \ - if (new && set) \ - cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \ - if (new) \ - return new; \ - new = kmalloc(sizeof(*new), GFP_KERNEL); \ - if (new) { \ - cache_init(&new->MEMBER); \ - goto retry; \ - } \ - return NULL; \ -} +extern struct cache_head * +sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash); +extern struct cache_head * +sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash); -#define DefineSimpleCacheLookup(STRUCT,INPLACE) \ - DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \ - & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\ - STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE) #define cache_for_each(pos, detail, index, member) \ for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ @@ -258,22 +150,19 @@ extern void cache_clean_deferred(void *owner); static inline struct cache_head *cache_get(struct cache_head *h) { - atomic_inc(&h->refcnt); + kref_get(&h->ref); return h; } -static inline int cache_put(struct cache_head *h, struct cache_detail *cd) +static inline void cache_put(struct cache_head *h, struct cache_detail *cd) { - if (atomic_read(&h->refcnt) <= 2 && + if (atomic_read(&h->ref.refcount) <= 2 && h->expiry_time < cd->nextcheck) cd->nextcheck = h->expiry_time; - return atomic_dec_and_test(&h->refcnt); + kref_put(&h->ref, cd->cache_put); } -extern void cache_init(struct cache_head *h); -extern void cache_fresh(struct cache_detail *detail, - struct cache_head *head, time_t expiry); extern int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp); extern void cache_flush(void); diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h index 0d6ed3c8bdc4..d93c24b47f3f 100644 --- a/include/linux/sunrpc/stats.h +++ b/include/linux/sunrpc/stats.h @@ -50,7 +50,7 @@ struct proc_dir_entry * rpc_proc_register(struct rpc_stat *); void rpc_proc_unregister(const char *); void rpc_proc_zero(struct rpc_program *); struct proc_dir_entry * svc_proc_register(struct svc_stat *, - struct file_operations *); + const struct file_operations *); void svc_proc_unregister(const char *); void svc_seq_show(struct seq_file *, @@ -65,7 +65,7 @@ static inline void rpc_proc_unregister(const char *p) {} static inline void rpc_proc_zero(struct rpc_program *p) {} static inline struct proc_dir_entry *svc_proc_register(struct svc_stat *s, - struct file_operations *f) { return NULL; } + const struct file_operations *f) { return NULL; } static inline void svc_proc_unregister(const char *p) {} static inline void svc_seq_show(struct seq_file *seq, diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index c119ce7cbd22..2fe2087edd66 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -45,9 +45,10 @@ struct svc_rqst; /* forward decl */ * of ip addresses to the given client. */ struct auth_domain { - struct cache_head h; + struct kref ref; + struct hlist_node hash; char *name; - int flavour; + struct auth_ops *flavour; }; /* @@ -86,6 +87,9 @@ struct auth_domain { * * domain_release() * This call releases a domain. + * set_client() + * Givens a pending request (struct svc_rqst), finds and assigns + * an appropriate 'auth_domain' as the client. */ struct auth_ops { char * name; @@ -117,7 +121,7 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor); extern struct auth_domain *unix_domain_find(char *name); extern void auth_domain_put(struct auth_domain *item); extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom); -extern struct auth_domain *auth_domain_lookup(struct auth_domain *item, int set); +extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); extern struct auth_domain *auth_domain_find(char *name); extern struct auth_domain *auth_unix_lookup(struct in_addr addr); extern int auth_unix_forget_old(struct auth_domain *dom); @@ -160,8 +164,6 @@ static inline unsigned long hash_mem(char *buf, int length, int bits) return hash >> (BITS_PER_LONG - bits); } -extern struct cache_detail auth_domain_cache, ip_map_cache; - #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ diff --git a/include/linux/synclink.h b/include/linux/synclink.h index 1b7cd8d1a71b..2993302f7923 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h @@ -1,7 +1,7 @@ /* * SyncLink Multiprotocol Serial Adapter Driver * - * $Id: synclink.h,v 3.10 2005/11/08 19:50:54 paulkf Exp $ + * $Id: synclink.h,v 3.11 2006/02/06 21:20:29 paulkf Exp $ * * Copyright (C) 1998-2000 by Microgate Corporation * @@ -221,6 +221,12 @@ struct mgsl_icount { __u32 rxidle; }; +struct gpio_desc { + __u32 state; + __u32 smask; + __u32 dir; + __u32 dmask; +}; #define DEBUG_LEVEL_DATA 1 #define DEBUG_LEVEL_ERROR 2 @@ -276,5 +282,8 @@ struct mgsl_icount { #define MGSL_IOCLOOPTXDONE _IO(MGSL_MAGIC_IOC,9) #define MGSL_IOCSIF _IO(MGSL_MAGIC_IOC,10) #define MGSL_IOCGIF _IO(MGSL_MAGIC_IOC,11) +#define MGSL_IOCSGPIO _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc) +#define MGSL_IOCGGPIO _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc) +#define MGSL_IOCWAITGPIO _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc) #endif /* _SYNCLINK_H_ */ diff --git a/include/linux/threads.h b/include/linux/threads.h index b59738ac6197..e646bcdf2614 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -28,7 +28,8 @@ #define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) /* - * A maximum of 4 million PIDs should be enough for a while: + * A maximum of 4 million PIDs should be enough for a while. + * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] */ #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) diff --git a/include/linux/time.h b/include/linux/time.h index bf0e785e2e03..0cd696cee998 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -73,12 +73,6 @@ extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); #define timespec_valid(ts) \ (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) -/* - * 64-bit nanosec type. Large enough to span 292+ years in nanosecond - * resolution. Ought to be enough for a while. - */ -typedef s64 nsec_t; - extern struct timespec xtime; extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; @@ -114,9 +108,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); * Returns the scalar nanosecond representation of the timespec * parameter. */ -static inline nsec_t timespec_to_ns(const struct timespec *ts) +static inline s64 timespec_to_ns(const struct timespec *ts) { - return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } /** @@ -126,9 +120,9 @@ static inline nsec_t timespec_to_ns(const struct timespec *ts) * Returns the scalar nanosecond representation of the timeval * parameter. */ -static inline nsec_t timeval_to_ns(const struct timeval *tv) +static inline s64 timeval_to_ns(const struct timeval *tv) { - return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) + + return ((s64) tv->tv_sec * NSEC_PER_SEC) + tv->tv_usec * NSEC_PER_USEC; } @@ -138,7 +132,7 @@ static inline nsec_t timeval_to_ns(const struct timeval *tv) * * Returns the timespec representation of the nsec parameter. */ -extern struct timespec ns_to_timespec(const nsec_t nsec); +extern struct timespec ns_to_timespec(const s64 nsec); /** * ns_to_timeval - Convert nanoseconds to timeval @@ -146,7 +140,7 @@ extern struct timespec ns_to_timespec(const nsec_t nsec); * * Returns the timeval representation of the nsec parameter. */ -extern struct timeval ns_to_timeval(const nsec_t nsec); +extern struct timeval ns_to_timeval(const s64 nsec); #endif /* __KERNEL__ */ diff --git a/include/linux/timer.h b/include/linux/timer.h index ee5a09e806e8..b5caabca553c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -96,6 +96,7 @@ static inline void add_timer(struct timer_list *timer) extern void init_timers(void); extern void run_local_timers(void); -extern int it_real_fn(void *); +struct hrtimer; +extern int it_real_fn(struct hrtimer *); #endif diff --git a/include/linux/timex.h b/include/linux/timex.h index 82dc9ae79d37..03914b7e41b1 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -307,6 +307,8 @@ time_interpolator_reset(void) /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ extern u64 current_tick_length(void); +extern int do_adjtimex(struct timex *); + #endif /* KERNEL */ #endif /* LINUX_TIMEX_H */ diff --git a/include/linux/topology.h b/include/linux/topology.h index e8eb0040ce3a..a305ae2e44b6 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -164,6 +164,15 @@ .nr_balance_failed = 0, \ } +#ifdef CONFIG_SCHED_MC +#ifndef SD_MC_INIT +/* for now its same as SD_CPU_INIT. + * TBD: Tune Domain parameters! + */ +#define SD_MC_INIT SD_CPU_INIT +#endif +#endif + #ifdef CONFIG_NUMA #ifndef SD_NODE_INIT #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! diff --git a/include/linux/types.h b/include/linux/types.h index 54ae2d59e71b..1046c7ad86d9 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -137,6 +137,10 @@ typedef __s64 int64_t; typedef unsigned long sector_t; #endif +#ifndef HAVE_BLKCNT_T +typedef unsigned long blkcnt_t; +#endif + /* * The type of an index into the pagecache. Use a #define so asm/types.h * can override it. diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h index b0ffe4356e5a..843aeaaa79d4 100644 --- a/include/linux/ufs_fs.h +++ b/include/linux/ufs_fs.h @@ -895,7 +895,7 @@ extern void ufs_set_link(struct inode *, struct ufs_dir_entry *, struct buffer_h /* file.c */ extern struct inode_operations ufs_file_inode_operations; -extern struct file_operations ufs_file_operations; +extern const struct file_operations ufs_file_operations; extern struct address_space_operations ufs_aops; @@ -915,7 +915,7 @@ extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create); /* namei.c */ -extern struct file_operations ufs_dir_operations; +extern const struct file_operations ufs_dir_operations; /* super.c */ extern void ufs_warning (struct super_block *, const char *, const char *, ...) __attribute__ ((format (printf, 3, 4))); diff --git a/include/linux/usb.h b/include/linux/usb.h index 130d125fda12..e34e5e3dce52 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -615,7 +615,7 @@ extern struct bus_type usb_bus_type; */ struct usb_class_driver { char *name; - struct file_operations *fops; + const struct file_operations *fops; int minor_base; }; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 2275bfec5b68..af2d6155d3fe 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -75,7 +75,7 @@ struct video_device int minor; /* device ops + callbacks */ - struct file_operations *fops; + const struct file_operations *fops; void (*release)(struct video_device *vfd); diff --git a/include/linux/x1205.h b/include/linux/x1205.h deleted file mode 100644 index 64fd3af894a5..000000000000 --- a/include/linux/x1205.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * x1205.h - defines for drivers/i2c/chips/x1205.c - * Copyright 2004 Karen Spearel - * Copyright 2005 Alessandro Zummo - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __LINUX_X1205_H__ -#define __LINUX_X1205_H__ - -/* commands */ - -#define X1205_CMD_GETDATETIME 0 -#define X1205_CMD_SETTIME 1 -#define X1205_CMD_SETDATETIME 2 -#define X1205_CMD_GETALARM 3 -#define X1205_CMD_SETALARM 4 -#define X1205_CMD_GETDTRIM 5 -#define X1205_CMD_SETDTRIM 6 -#define X1205_CMD_GETATRIM 7 -#define X1205_CMD_SETATRIM 8 - -extern int x1205_do_command(unsigned int cmd, void *arg); -extern int x1205_direct_attach(int adapter_id, - struct i2c_client_address_data *address_data); - -#endif /* __LINUX_X1205_H__ */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index b6f0905a4ee2..916013ca4a5c 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -300,29 +300,30 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) -extern struct notifier_block *nf_conntrack_chain; -extern struct notifier_block *nf_conntrack_expect_chain; +extern struct atomic_notifier_head nf_conntrack_chain; +extern struct atomic_notifier_head nf_conntrack_expect_chain; static inline int nf_conntrack_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&nf_conntrack_chain, nb); + return atomic_notifier_chain_register(&nf_conntrack_chain, nb); } static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&nf_conntrack_chain, nb); + return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); } static inline int nf_conntrack_expect_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&nf_conntrack_expect_chain, nb); + return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); } static inline int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&nf_conntrack_expect_chain, nb); + return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, + nb); } extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); @@ -347,14 +348,14 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) { if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) - notifier_call_chain(&nf_conntrack_chain, event, ct); + atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); } static inline void nf_conntrack_expect_event(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp) { - notifier_call_chain(&nf_conntrack_expect_chain, event, exp); + atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp); } #else /* CONFIG_NF_CONNTRACK_EVENTS */ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 11641c9384f7..c5d7f920c352 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -145,7 +145,7 @@ static inline struct request_sock * { struct request_sock *req = queue->rskq_accept_head; - queue->rskq_accept_head = queue->rskq_accept_head = NULL; + queue->rskq_accept_head = NULL; return req; } diff --git a/include/sound/core.h b/include/sound/core.h index 144bdc2f217f..7f32c12b4a0a 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -186,7 +186,7 @@ struct snd_minor { int type; /* SNDRV_DEVICE_TYPE_XXX */ int card; /* card number */ int device; /* device number */ - struct file_operations *f_ops; /* file operations */ + const struct file_operations *f_ops; /* file operations */ void *private_data; /* private data for f_ops->open */ char name[0]; /* device name (keep at the end of structure) */ @@ -200,14 +200,14 @@ extern int snd_ecards_limit; void snd_request_card(int card); int snd_register_device(int type, struct snd_card *card, int dev, - struct file_operations *f_ops, void *private_data, + const struct file_operations *f_ops, void *private_data, const char *name); int snd_unregister_device(int type, struct snd_card *card, int dev); void *snd_lookup_minor_data(unsigned int minor, int type); #ifdef CONFIG_SND_OSSEMUL int snd_register_oss_device(int type, struct snd_card *card, int dev, - struct file_operations *f_ops, void *private_data, + const struct file_operations *f_ops, void *private_data, const char *name); int snd_unregister_oss_device(int type, struct snd_card *card, int dev); void *snd_lookup_oss_minor_data(unsigned int minor, int type); diff --git a/init/do_mounts.c b/init/do_mounts.c index 8b671fe68afa..adb7cad3e6ee 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -23,7 +23,6 @@ int root_mountflags = MS_RDONLY | MS_SILENT; char * __initdata root_device_name; static char __initdata saved_root_name[64]; -/* this is initialized in init/main.c */ dev_t ROOT_DEV; static int __init load_ramdisk(char *str) diff --git a/init/initramfs.c b/init/initramfs.c index 77b934cccefe..679d870d991b 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -519,7 +519,7 @@ void __init populate_rootfs(void) return; } printk("it isn't (%s); looks like an initrd\n", err); - fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700); + fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); if (fd >= 0) { sys_write(fd, (char *)initrd_start, initrd_end - initrd_start); diff --git a/init/main.c b/init/main.c index 006dcd547dc2..4a2f0898dda1 100644 --- a/init/main.c +++ b/init/main.c @@ -341,7 +341,7 @@ static void __init setup_per_cpu_areas(void) #endif ptr = alloc_bootmem(size * nr_possible_cpus); - for_each_cpu(i) { + for_each_possible_cpu(i) { __per_cpu_offset[i] = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); ptr += size; @@ -645,24 +645,6 @@ static void run_init_process(char *init_filename) execve(init_filename, argv_init, envp_init); } -static inline void fixup_cpu_present_map(void) -{ -#ifdef CONFIG_SMP - int i; - - /* - * If arch is not hotplug ready and did not populate - * cpu_present_map, just make cpu_present_map same as cpu_possible_map - * for other cpu bringup code to function as normal. e.g smp_init() etc. - */ - if (cpus_empty(cpu_present_map)) { - for_each_cpu(i) { - cpu_set(i, cpu_present_map); - } - } -#endif -} - static int init(void * unused) { lock_kernel(); @@ -684,7 +666,6 @@ static int init(void * unused) do_pre_smp_initcalls(); - fixup_cpu_present_map(); smp_init(); sched_init_smp(); diff --git a/ipc/compat.c b/ipc/compat.c index 1fe95f6659dd..a544dfbb082a 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -30,7 +30,7 @@ #include <linux/slab.h> #include <linux/syscalls.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include "util.h" diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 85c52fd26bff..41ecbd440fed 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -25,6 +25,8 @@ #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/signal.h> +#include <linux/mutex.h> + #include <net/sock.h> #include "util.h" @@ -760,7 +762,7 @@ out_unlock: * The receiver accepts the message and returns without grabbing the queue * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c fore more details. + * ipc/sem.c for more details. * * The same algorithm is used for senders. */ diff --git a/ipc/msg.c b/ipc/msg.c index 7eec5ed32379..48a7f17a7236 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -28,6 +28,8 @@ #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> +#include <linux/mutex.h> + #include <asm/current.h> #include <asm/uaccess.h> #include "util.h" @@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res) * removes the message queue from message queue ID * array, and cleans up all the messages associated with this queue. * - * msg_ids.sem and the spinlock for this message queue is hold - * before freeque() is called. msg_ids.sem remains locked on exit. + * msg_ids.mutex and the spinlock for this message queue is hold + * before freeque() is called. msg_ids.mutex remains locked on exit. */ static void freeque (struct msg_queue *msq, int id) { @@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) int id, ret = -EPERM; struct msg_queue *msq; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (key == IPC_PRIVATE) ret = newque(key, msgflg); else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ @@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) } msg_unlock(msq); } - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return ret; } @@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgmnb = msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids.in_use; msginfo.msgmap = atomic_read(&msg_hdrs); @@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgtql = MSGTQL; } max_id = msg_ids.max_id; - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) return -EINVAL; } - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); msq = msg_lock(msqid); err=-EINVAL; if (msq == NULL) @@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) } err = 0; out_up: - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return err; out_unlock_up: msg_unlock(msq); diff --git a/ipc/sem.c b/ipc/sem.c index 59696a840be1..642659cd596b 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -75,6 +75,8 @@ #include <linux/audit.h> #include <linux/capability.h> #include <linux/seq_file.h> +#include <linux/mutex.h> + #include <asm/uaccess.h> #include "util.h" @@ -139,7 +141,7 @@ void __init sem_init (void) * * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without - * performing any operation on the semaphore array. + * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following @@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) if (nsems < 0 || nsems > sc_semmsl) return -EINVAL; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (key == IPC_PRIVATE) { err = newary(key, nsems, semflg); @@ -227,8 +229,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) err = -EEXIST; } else { sma = sem_lock(id); - if(sma==NULL) - BUG(); + BUG_ON(sma==NULL); if (nsems > sma->sem_nsems) err = -EINVAL; else if (ipcperms(&sma->sem_perm, semflg)) @@ -242,7 +243,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) sem_unlock(sma); } - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; } @@ -437,8 +438,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.sem down and - * the spinlock for this semaphore set hold. sem_ids.sem remains locked +/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and + * the spinlock for this semaphore set hold. sem_ids.mutex remains locked * on exit. */ static void freeary (struct sem_array *sma, int id) @@ -525,7 +526,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids.in_use; seminfo.semaem = used_sems; @@ -534,7 +535,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semaem = SEMAEM; } max_id = sem_ids.max_id; - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -885,9 +886,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) return err; case IPC_RMID: case IPC_SET: - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); err = semctl_down(semid,semnum,cmd,version,arg); - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; default: return -EINVAL; @@ -1181,8 +1182,7 @@ retry_undos: sma = sem_lock(semid); if(sma==NULL) { - if(queue.prev != NULL) - BUG(); + BUG_ON(queue.prev != NULL); error = -EIDRM; goto out_free; } @@ -1299,9 +1299,9 @@ found: /* perform adjustments registered in u */ nsems = sma->sem_nsems; for (i = 0; i < nsems; i++) { - struct sem * sem = &sma->sem_base[i]; + struct sem * semaphore = &sma->sem_base[i]; if (u->semadj[i]) { - sem->semval += u->semadj[i]; + semaphore->semval += u->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1315,11 +1315,11 @@ found: * * Manfred <manfred@colorfullife.com> */ - if (sem->semval < 0) - sem->semval = 0; - if (sem->semval > SEMVMX) - sem->semval = SEMVMX; - sem->sempid = current->tgid; + if (semaphore->semval < 0) + semaphore->semval = 0; + if (semaphore->semval > SEMVMX) + semaphore->semval = SEMVMX; + semaphore->sempid = current->tgid; } } sma->sem_otime = get_seconds(); diff --git a/ipc/shm.c b/ipc/shm.c index 6f9615c09fb2..f806a2e314e0 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -30,6 +30,7 @@ #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> +#include <linux/mutex.h> #include <asm/uaccess.h> @@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd) * * @shp: struct to free * - * It has to be called with shp and shm_ids.sem locked, + * It has to be called with shp and shm_ids.mutex locked, * but returns with shp unlocked and freed. */ static void shm_destroy (struct shmid_kernel *shp) @@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd) int id = file->f_dentry->d_inode->i_ino; struct shmid_kernel *shp; - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); /* remove from the list of attaches of the shm segment */ if(!(shp = shm_lock(id))) BUG(); @@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd) shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); } static int shm_mmap(struct file * file, struct vm_area_struct * vma) @@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) struct shmid_kernel *shp; int err, id = 0; - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if (key == IPC_PRIVATE) { err = newseg(key, shmflg, size); } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { @@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) } shm_unlock(shp); } - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); return err; } @@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) return err; memset(&shm_info,0,sizeof(shm_info)); - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shm_info.used_ids = shm_ids.in_use; shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = shm_ids.max_id; - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; @@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) * Instead we set a destroyed flag, and then blow * the name away when the usage hits zero. */ - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err = -EINVAL; if (shp == NULL) @@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) shm_unlock(shp); } else shm_destroy (shp); - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; } @@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) err = -EFAULT; goto out; } - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err=-EINVAL; if(shp==NULL) goto out_up; - if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) + if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, + setbuf.mode, &(shp->shm_perm)))) goto out_unlock_up; err = shm_checkid(shp,shmid); if(err) @@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) out_unlock_up: shm_unlock(shp); out_up: - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; out_unlock: shm_unlock(shp); @@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) invalid: up_write(¤t->mm->mmap_sem); - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if(!(shp = shm_lock(shmid))) BUG(); shp->shm_nattch--; @@ -780,7 +782,7 @@ invalid: shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); *raddr = (unsigned long) user_addr; err = 0; diff --git a/ipc/util.c b/ipc/util.c index 862621980b01..23151ef32590 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -68,7 +68,8 @@ __initcall(ipc_init); void __init ipc_init_ids(struct ipc_ids* ids, int size) { int i; - sema_init(&ids->sem,1); + + mutex_init(&ids->mutex); if(size > IPCMNI) size = IPCMNI; @@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, * @ids: Identifier set * @key: The key to find * - * Requires ipc_ids.sem locked. + * Requires ipc_ids.mutex locked. * Returns the identifier if found or -1 if not. */ @@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) /* * rcu_dereference() is not needed here - * since ipc_ids.sem is held + * since ipc_ids.mutex is held */ for (id = 0; id <= max_id; id++) { p = ids->entries->p[id]; @@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) } /* - * Requires ipc_ids.sem locked + * Requires ipc_ids.mutex locked */ static int grow_ary(struct ipc_ids* ids, int newsize) { @@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize) * is returned. The list is returned in a locked state on success. * On failure the list is not locked and -1 is returned. * - * Called with ipc_ids.sem held. + * Called with ipc_ids.mutex held. */ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) @@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) /* * rcu_dereference()() is not needed here since - * ipc_ids.sem is held + * ipc_ids.mutex is held */ for (id = 0; id < size; id++) { if(ids->entries->p[id] == NULL) @@ -257,7 +258,7 @@ found: * fed an invalid identifier. The entry is removed and internal * variables recomputed. The object associated with the identifier * is returned. - * ipc_ids.sem and the spinlock for this ID is hold before this function + * ipc_ids.mutex and the spinlock for this ID is hold before this function * is called, and remain locked on the exit. */ @@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) /* * do not need a rcu_dereference()() here to force ordering - * on Alpha, since the ipc_ids.sem is held. + * on Alpha, since the ipc_ids.mutex is held. */ p = ids->entries->p[lid]; ids->entries->p[lid] = NULL; @@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) /* * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() - * is called with shm_ids.sem locked. Since grow_ary() is also called with - * shm_ids.sem down(for Shared Memory), there is no need to add read + * is called with shm_ids.mutex locked. Since grow_ary() is also called with + * shm_ids.mutex down(for Shared Memory), there is no need to add read * barriers here to gurantee the writes in grow_ary() are seen in order * here (for Alpha). * - * However ipc_get() itself does not necessary require ipc_ids.sem down. So - * if in the future ipc_get() is used by other places without ipc_ids.sem + * However ipc_get() itself does not necessary require ipc_ids.mutex down. So + * if in the future ipc_get() is used by other places without ipc_ids.mutex * down, then ipc_get() needs read memery barriers as ipc_lock() does. */ struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) @@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) * Take the lock - this will be released by the corresponding * call to stop(). */ - down(&iface->ids->sem); + mutex_lock(&iface->ids->mutex); /* pos < 0 is invalid */ if (*pos < 0) @@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) ipc_unlock(ipc); /* Release the lock we took in start() */ - up(&iface->ids->sem); + mutex_unlock(&iface->ids->mutex); } static int sysvipc_proc_show(struct seq_file *s, void *it) diff --git a/ipc/util.h b/ipc/util.h index efaff3ee7de7..0181553d31d8 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -25,7 +25,7 @@ struct ipc_ids { int max_id; unsigned short seq; unsigned short seq_max; - struct semaphore sem; + struct mutex mutex; struct ipc_id_ary nullentry; struct ipc_id_ary* entries; }; @@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) #endif -/* must be called with ids->sem acquired.*/ +/* must be called with ids->mutex acquired.*/ int ipc_findkey(struct ipc_ids* ids, key_t key); int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); diff --git a/kernel/Makefile b/kernel/Makefile index ff1c11dc12cf..58908f9d156a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -12,6 +12,9 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o obj-$(CONFIG_FUTEX) += futex.o +ifeq ($(CONFIG_COMPAT),y) +obj-$(CONFIG_FUTEX) += futex_compat.o +endif obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += cpu.o spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o diff --git a/kernel/compat.c b/kernel/compat.c index 8c9cd88b6785..c1601a84f8d8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -17,10 +17,10 @@ #include <linux/time.h> #include <linux/signal.h> #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ -#include <linux/futex.h> /* for FUTEX_WAIT */ #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/security.h> +#include <linux/timex.h> #include <asm/uaccess.h> @@ -238,28 +238,6 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, return ret; } -#ifdef CONFIG_FUTEX -asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val, - struct compat_timespec __user *utime, u32 __user *uaddr2, - int val3) -{ - struct timespec t; - unsigned long timeout = MAX_SCHEDULE_TIMEOUT; - int val2 = 0; - - if ((op == FUTEX_WAIT) && utime) { - if (get_compat_timespec(&t, utime)) - return -EFAULT; - timeout = timespec_to_jiffies(&t) + 1; - } - if (op >= FUTEX_REQUEUE) - val2 = (int) (unsigned long) utime; - - return do_futex((unsigned long)uaddr, op, val, timeout, - (unsigned long)uaddr2, val2, val3); -} -#endif - asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { @@ -898,3 +876,61 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ + +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) +{ + struct timex txc; + int ret; + + memset(&txc, 0, sizeof(struct timex)); + + if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || + __get_user(txc.modes, &utp->modes) || + __get_user(txc.offset, &utp->offset) || + __get_user(txc.freq, &utp->freq) || + __get_user(txc.maxerror, &utp->maxerror) || + __get_user(txc.esterror, &utp->esterror) || + __get_user(txc.status, &utp->status) || + __get_user(txc.constant, &utp->constant) || + __get_user(txc.precision, &utp->precision) || + __get_user(txc.tolerance, &utp->tolerance) || + __get_user(txc.time.tv_sec, &utp->time.tv_sec) || + __get_user(txc.time.tv_usec, &utp->time.tv_usec) || + __get_user(txc.tick, &utp->tick) || + __get_user(txc.ppsfreq, &utp->ppsfreq) || + __get_user(txc.jitter, &utp->jitter) || + __get_user(txc.shift, &utp->shift) || + __get_user(txc.stabil, &utp->stabil) || + __get_user(txc.jitcnt, &utp->jitcnt) || + __get_user(txc.calcnt, &utp->calcnt) || + __get_user(txc.errcnt, &utp->errcnt) || + __get_user(txc.stbcnt, &utp->stbcnt)) + return -EFAULT; + + ret = do_adjtimex(&txc); + + if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || + __put_user(txc.modes, &utp->modes) || + __put_user(txc.offset, &utp->offset) || + __put_user(txc.freq, &utp->freq) || + __put_user(txc.maxerror, &utp->maxerror) || + __put_user(txc.esterror, &utp->esterror) || + __put_user(txc.status, &utp->status) || + __put_user(txc.constant, &utp->constant) || + __put_user(txc.precision, &utp->precision) || + __put_user(txc.tolerance, &utp->tolerance) || + __put_user(txc.time.tv_sec, &utp->time.tv_sec) || + __put_user(txc.time.tv_usec, &utp->time.tv_usec) || + __put_user(txc.tick, &utp->tick) || + __put_user(txc.ppsfreq, &utp->ppsfreq) || + __put_user(txc.jitter, &utp->jitter) || + __put_user(txc.shift, &utp->shift) || + __put_user(txc.stabil, &utp->stabil) || + __put_user(txc.jitcnt, &utp->jitcnt) || + __put_user(txc.calcnt, &utp->calcnt) || + __put_user(txc.errcnt, &utp->errcnt) || + __put_user(txc.stbcnt, &utp->stbcnt)) + ret = -EFAULT; + + return ret; +} diff --git a/kernel/cpu.c b/kernel/cpu.c index 8be22bd80933..fe2b8d0bfe4c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -18,7 +18,7 @@ /* This protects CPUs going up and down... */ static DECLARE_MUTEX(cpucontrol); -static struct notifier_block *cpu_chain; +static BLOCKING_NOTIFIER_HEAD(cpu_chain); #ifdef CONFIG_HOTPLUG_CPU static struct task_struct *lock_cpu_hotplug_owner; @@ -71,21 +71,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { - int ret; - - if ((ret = lock_cpu_hotplug_interruptible()) != 0) - return ret; - ret = notifier_chain_register(&cpu_chain, nb); - unlock_cpu_hotplug(); - return ret; + return blocking_notifier_chain_register(&cpu_chain, nb); } EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - lock_cpu_hotplug(); - notifier_chain_unregister(&cpu_chain, nb); - unlock_cpu_hotplug(); + blocking_notifier_chain_unregister(&cpu_chain, nb); } EXPORT_SYMBOL(unregister_cpu_notifier); @@ -141,7 +133,7 @@ int cpu_down(unsigned int cpu) goto out; } - err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, + err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { printk("%s: attempt to take down CPU %u failed\n", @@ -159,7 +151,7 @@ int cpu_down(unsigned int cpu) p = __stop_machine_run(take_cpu_down, NULL, cpu); if (IS_ERR(p)) { /* CPU didn't die: tell everyone. Can't complain. */ - if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, + if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, (void *)(long)cpu) == NOTIFY_BAD) BUG(); @@ -182,8 +174,8 @@ int cpu_down(unsigned int cpu) put_cpu(); /* CPU is completely dead: tell everyone. Too late to complain. */ - if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) - == NOTIFY_BAD) + if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, + (void *)(long)cpu) == NOTIFY_BAD) BUG(); check_for_tasks(cpu); @@ -211,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) goto out; } - ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); + ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", __FUNCTION__, cpu); @@ -226,11 +218,12 @@ int __devinit cpu_up(unsigned int cpu) BUG_ON(!cpu_online(cpu)); /* Now call notifier in preparation. */ - notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); + blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); out_notify: if (ret != 0) - notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); + blocking_notifier_call_chain(&cpu_chain, + CPU_UP_CANCELED, hcpu); out: unlock_cpu_hotplug(); return ret; diff --git a/kernel/exit.c b/kernel/exit.c index 8037405e136e..a8c7efc7a681 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -31,6 +31,8 @@ #include <linux/signal.h> #include <linux/cn_proc.h> #include <linux/mutex.h> +#include <linux/futex.h> +#include <linux/compat.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -852,6 +854,12 @@ fastcall NORET_TYPE void do_exit(long code) exit_itimers(tsk->signal); acct_process(code); } + if (unlikely(tsk->robust_list)) + exit_robust_list(tsk); +#ifdef CONFIG_COMPAT + if (unlikely(tsk->compat_robust_list)) + compat_exit_robust_list(tsk); +#endif exit_mm(tsk); exit_sem(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index a02063903aaa..c49bd193b058 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -769,8 +769,7 @@ int unshare_files(void) struct files_struct *files = current->files; int rc; - if(!files) - BUG(); + BUG_ON(!files); /* This can race but the race causes us to copy when we don't need to and drop the copy */ @@ -848,7 +847,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); sig->it_real_incr.tv64 = 0; sig->real_timer.function = it_real_fn; - sig->real_timer.data = tsk; + sig->tsk = tsk; sig->it_virt_expires = cputime_zero; sig->it_virt_incr = cputime_zero; @@ -1062,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags, * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; - + p->robust_list = NULL; +#ifdef CONFIG_COMPAT + p->compat_robust_list = NULL; +#endif /* * sigaltstack should be cleared when sharing the same VM */ diff --git a/kernel/futex.c b/kernel/futex.c index 5efa2f978032..9c9b2b6b22dd 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -8,6 +8,10 @@ * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * + * Robust futex support started by Ingo Molnar + * (C) Copyright 2006 Red Hat Inc, All Rights Reserved + * Thanks to Thomas Gleixner for suggestions, analysis and fixes. + * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. @@ -829,6 +833,172 @@ error: goto out; } +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + * + * Implementation: user-space maintains a per-thread list of locks it + * is holding. Upon do_exit(), the kernel carefully walks this list, + * and marks all locks that are owned by this thread with the + * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is + * always manipulated with the lock held, so the list is private and + * per-thread. Userspace also maintains a per-thread 'list_op_pending' + * field, to allow the kernel to clean up if the thread dies after + * acquiring the lock, but just before it could have added itself to + * the list. There can only be one such pending lock. + */ + +/** + * sys_set_robust_list - set the robust-futex list head of a task + * @head: pointer to the list-head + * @len: length of the list-head, as userspace expects + */ +asmlinkage long +sys_set_robust_list(struct robust_list_head __user *head, + size_t len) +{ + /* + * The kernel knows only one size for now: + */ + if (unlikely(len != sizeof(*head))) + return -EINVAL; + + current->robust_list = head; + + return 0; +} + +/** + * sys_get_robust_list - get the robust-futex list head of a task + * @pid: pid of the process [zero for current task] + * @head_ptr: pointer to a list-head pointer, the kernel fills it in + * @len_ptr: pointer to a length field, the kernel fills in the header size + */ +asmlinkage long +sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, + size_t __user *len_ptr) +{ + struct robust_list_head *head; + unsigned long ret; + + if (!pid) + head = current->robust_list; + else { + struct task_struct *p; + + ret = -ESRCH; + read_lock(&tasklist_lock); + p = find_task_by_pid(pid); + if (!p) + goto err_unlock; + ret = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; + head = p->robust_list; + read_unlock(&tasklist_lock); + } + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(head, head_ptr); + +err_unlock: + read_unlock(&tasklist_lock); + + return ret; +} + +/* + * Process a futex-list entry, check whether it's owned by the + * dying task, and do notification if so: + */ +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) +{ + u32 uval; + +retry: + if (get_user(uval, uaddr)) + return -1; + + if ((uval & FUTEX_TID_MASK) == curr->pid) { + /* + * Ok, this dying thread is truly holding a futex + * of interest. Set the OWNER_DIED bit atomically + * via cmpxchg, and if the value had FUTEX_WAITERS + * set, wake up a waiter (if any). (We have to do a + * futex_wake() even if OWNER_DIED is already set - + * to handle the rare but possible case of recursive + * thread-death.) The rest of the cleanup is done in + * userspace. + */ + if (futex_atomic_cmpxchg_inatomic(uaddr, uval, + uval | FUTEX_OWNER_DIED) != uval) + goto retry; + + if (uval & FUTEX_WAITERS) + futex_wake((unsigned long)uaddr, 1); + } + return 0; +} + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void exit_robust_list(struct task_struct *curr) +{ + struct robust_list_head __user *head = curr->robust_list; + struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT; + unsigned long futex_offset; + + /* + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): + */ + if (get_user(entry, &head->list.next)) + return; + /* + * Fetch the relative futex offset: + */ + if (get_user(futex_offset, &head->futex_offset)) + return; + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (get_user(pending, &head->list_op_pending)) + return; + if (pending) + handle_futex_death((void *)pending + futex_offset, curr); + + while (entry != &head->list) { + /* + * A pending lock might already be on the list, so + * dont process it twice: + */ + if (entry != pending) + if (handle_futex_death((void *)entry + futex_offset, + curr)) + return; + /* + * Fetch the next entry in the list: + */ + if (get_user(entry, &entry->next)) + return; + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; + + cond_resched(); + } +} + long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, unsigned long uaddr2, int val2, int val3) { diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c new file mode 100644 index 000000000000..54274fc85321 --- /dev/null +++ b/kernel/futex_compat.c @@ -0,0 +1,142 @@ +/* + * linux/kernel/futex_compat.c + * + * Futex compatibililty routines. + * + * Copyright 2006, Red Hat, Inc., Ingo Molnar + */ + +#include <linux/linkage.h> +#include <linux/compat.h> +#include <linux/futex.h> + +#include <asm/uaccess.h> + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void compat_exit_robust_list(struct task_struct *curr) +{ + struct compat_robust_list_head __user *head = curr->compat_robust_list; + struct robust_list __user *entry, *pending; + compat_uptr_t uentry, upending; + unsigned int limit = ROBUST_LIST_LIMIT; + compat_long_t futex_offset; + + /* + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): + */ + if (get_user(uentry, &head->list.next)) + return; + entry = compat_ptr(uentry); + /* + * Fetch the relative futex offset: + */ + if (get_user(futex_offset, &head->futex_offset)) + return; + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (get_user(upending, &head->list_op_pending)) + return; + pending = compat_ptr(upending); + if (upending) + handle_futex_death((void *)pending + futex_offset, curr); + + while (compat_ptr(uentry) != &head->list) { + /* + * A pending lock might already be on the list, so + * dont process it twice: + */ + if (entry != pending) + if (handle_futex_death((void *)entry + futex_offset, + curr)) + return; + + /* + * Fetch the next entry in the list: + */ + if (get_user(uentry, (compat_uptr_t *)&entry->next)) + return; + entry = compat_ptr(uentry); + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; + + cond_resched(); + } +} + +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len) +{ + if (unlikely(len != sizeof(*head))) + return -EINVAL; + + current->compat_robust_list = head; + + return 0; +} + +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, + compat_size_t __user *len_ptr) +{ + struct compat_robust_list_head *head; + unsigned long ret; + + if (!pid) + head = current->compat_robust_list; + else { + struct task_struct *p; + + ret = -ESRCH; + read_lock(&tasklist_lock); + p = find_task_by_pid(pid); + if (!p) + goto err_unlock; + ret = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; + head = p->compat_robust_list; + read_unlock(&tasklist_lock); + } + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(ptr_to_compat(head), head_ptr); + +err_unlock: + read_unlock(&tasklist_lock); + + return ret; +} + +asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, + struct compat_timespec __user *utime, u32 __user *uaddr2, + u32 val3) +{ + struct timespec t; + unsigned long timeout = MAX_SCHEDULE_TIMEOUT; + int val2 = 0; + + if ((op == FUTEX_WAIT) && utime) { + if (get_compat_timespec(&t, utime)) + return -EFAULT; + timeout = timespec_to_jiffies(&t) + 1; + } + if (op >= FUTEX_REQUEUE) + val2 = (int) (unsigned long) utime; + + return do_futex((unsigned long)uaddr, op, val, timeout, + (unsigned long)uaddr2, val2, val3); +} diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 14bc9cfa6399..0237a556eb1f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -123,6 +123,26 @@ void ktime_get_ts(struct timespec *ts) EXPORT_SYMBOL_GPL(ktime_get_ts); /* + * Get the coarse grained time at the softirq based on xtime and + * wall_to_monotonic. + */ +static void hrtimer_get_softirq_time(struct hrtimer_base *base) +{ + ktime_t xtim, tomono; + unsigned long seq; + + do { + seq = read_seqbegin(&xtime_lock); + xtim = timespec_to_ktime(xtime); + tomono = timespec_to_ktime(wall_to_monotonic); + + } while (read_seqretry(&xtime_lock, seq)); + + base[CLOCK_REALTIME].softirq_time = xtim; + base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); +} + +/* * Functions and macros which are different for UP/SMP systems are kept in a * single place */ @@ -246,7 +266,7 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) /* * Divide a ktime value by a nanosecond value */ -static unsigned long ktime_divns(const ktime_t kt, nsec_t div) +static unsigned long ktime_divns(const ktime_t kt, s64 div) { u64 dclc, inc, dns; int sft = 0; @@ -281,18 +301,17 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) * hrtimer_forward - forward the timer expiry * * @timer: hrtimer to forward + * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. * Returns the number of overruns. */ unsigned long -hrtimer_forward(struct hrtimer *timer, ktime_t interval) +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) { unsigned long orun = 1; - ktime_t delta, now; - - now = timer->base->get_time(); + ktime_t delta; delta = ktime_sub(now, timer->expires); @@ -303,7 +322,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t interval) interval.tv64 = timer->base->resolution.tv64; if (unlikely(delta.tv64 >= interval.tv64)) { - nsec_t incr = ktime_to_ns(interval); + s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); timer->expires = ktime_add_ns(timer->expires, incr * orun); @@ -355,8 +374,6 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) rb_link_node(&timer->node, parent, link); rb_insert_color(&timer->node, &base->active); - timer->state = HRTIMER_PENDING; - if (!base->first || timer->expires.tv64 < rb_entry(base->first, struct hrtimer, node)->expires.tv64) base->first = &timer->node; @@ -376,6 +393,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) if (base->first == &timer->node) base->first = rb_next(&timer->node); rb_erase(&timer->node, &base->active); + timer->node.rb_parent = HRTIMER_INACTIVE; } /* @@ -386,7 +404,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) { if (hrtimer_active(timer)) { __remove_hrtimer(timer, base); - timer->state = HRTIMER_INACTIVE; return 1; } return 0; @@ -560,6 +577,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, clock_id = CLOCK_MONOTONIC; timer->base = &bases[clock_id]; + timer->node.rb_parent = HRTIMER_INACTIVE; } /** @@ -586,48 +604,35 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) */ static inline void run_hrtimer_queue(struct hrtimer_base *base) { - ktime_t now = base->get_time(); struct rb_node *node; + if (base->get_softirq_time) + base->softirq_time = base->get_softirq_time(); + spin_lock_irq(&base->lock); while ((node = base->first)) { struct hrtimer *timer; - int (*fn)(void *); + int (*fn)(struct hrtimer *); int restart; - void *data; timer = rb_entry(node, struct hrtimer, node); - if (now.tv64 <= timer->expires.tv64) + if (base->softirq_time.tv64 <= timer->expires.tv64) break; fn = timer->function; - data = timer->data; set_curr_timer(base, timer); - timer->state = HRTIMER_RUNNING; __remove_hrtimer(timer, base); spin_unlock_irq(&base->lock); - /* - * fn == NULL is special case for the simplest timer - * variant - wake up process and do not restart: - */ - if (!fn) { - wake_up_process(data); - restart = HRTIMER_NORESTART; - } else - restart = fn(data); + restart = fn(timer); spin_lock_irq(&base->lock); - /* Another CPU has added back the timer */ - if (timer->state != HRTIMER_RUNNING) - continue; - - if (restart == HRTIMER_RESTART) + if (restart != HRTIMER_NORESTART) { + BUG_ON(hrtimer_active(timer)); enqueue_hrtimer(timer, base); - else - timer->state = HRTIMER_EXPIRED; + } } set_curr_timer(base, NULL); spin_unlock_irq(&base->lock); @@ -641,6 +646,8 @@ void hrtimer_run_queues(void) struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); int i; + hrtimer_get_softirq_time(base); + for (i = 0; i < MAX_HRTIMER_BASES; i++) run_hrtimer_queue(&base[i]); } @@ -649,79 +656,70 @@ void hrtimer_run_queues(void) * Sleep related functions: */ -/** - * schedule_hrtimer - sleep until timeout - * - * @timer: hrtimer variable initialized with the correct clock base - * @mode: timeout value is abs/rel - * - * Make the current task sleep until @timeout is - * elapsed. - * - * You can set the task state as follows - - * - * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to - * pass before the routine returns. The routine will return 0 - * - * %TASK_INTERRUPTIBLE - the routine may return early if a signal is - * delivered to the current task. In this case the remaining time - * will be returned - * - * The current task state is guaranteed to be TASK_RUNNING when this - * routine returns. - */ -static ktime_t __sched -schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode) -{ - /* fn stays NULL, meaning single-shot wakeup: */ - timer->data = current; +struct sleep_hrtimer { + struct hrtimer timer; + struct task_struct *task; + int expired; +}; - hrtimer_start(timer, timer->expires, mode); +static int nanosleep_wakeup(struct hrtimer *timer) +{ + struct sleep_hrtimer *t = + container_of(timer, struct sleep_hrtimer, timer); - schedule(); - hrtimer_cancel(timer); + t->expired = 1; + wake_up_process(t->task); - /* Return the remaining time: */ - if (timer->state != HRTIMER_EXPIRED) - return ktime_sub(timer->expires, timer->base->get_time()); - else - return (ktime_t) {.tv64 = 0 }; + return HRTIMER_NORESTART; } -static inline ktime_t __sched -schedule_hrtimer_interruptible(struct hrtimer *timer, - const enum hrtimer_mode mode) +static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode) { - set_current_state(TASK_INTERRUPTIBLE); + t->timer.function = nanosleep_wakeup; + t->task = current; + t->expired = 0; + + do { + set_current_state(TASK_INTERRUPTIBLE); + hrtimer_start(&t->timer, t->timer.expires, mode); + + schedule(); + + if (unlikely(!t->expired)) { + hrtimer_cancel(&t->timer); + mode = HRTIMER_ABS; + } + } while (!t->expired && !signal_pending(current)); - return schedule_hrtimer(timer, mode); + return t->expired; } static long __sched nanosleep_restart(struct restart_block *restart) { + struct sleep_hrtimer t; struct timespec __user *rmtp; struct timespec tu; - void *rfn_save = restart->fn; - struct hrtimer timer; - ktime_t rem; + ktime_t time; restart->fn = do_no_restart_syscall; - hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS); - - timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; - - rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS); + hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS); + t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; - if (rem.tv64 <= 0) + if (do_nanosleep(&t, HRTIMER_ABS)) return 0; rmtp = (struct timespec __user *) restart->arg2; - tu = ktime_to_timespec(rem); - if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; + if (rmtp) { + time = ktime_sub(t.timer.expires, t.timer.base->get_time()); + if (time.tv64 <= 0) + return 0; + tu = ktime_to_timespec(time); + if (copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; + } - restart->fn = rfn_save; + restart->fn = nanosleep_restart; /* The other values in restart are already filled in */ return -ERESTART_RESTARTBLOCK; @@ -731,33 +729,34 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, const enum hrtimer_mode mode, const clockid_t clockid) { struct restart_block *restart; - struct hrtimer timer; + struct sleep_hrtimer t; struct timespec tu; ktime_t rem; - hrtimer_init(&timer, clockid, mode); - - timer.expires = timespec_to_ktime(*rqtp); - - rem = schedule_hrtimer_interruptible(&timer, mode); - if (rem.tv64 <= 0) + hrtimer_init(&t.timer, clockid, mode); + t.timer.expires = timespec_to_ktime(*rqtp); + if (do_nanosleep(&t, mode)) return 0; /* Absolute timers do not update the rmtp value and restart: */ if (mode == HRTIMER_ABS) return -ERESTARTNOHAND; - tu = ktime_to_timespec(rem); - - if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; + if (rmtp) { + rem = ktime_sub(t.timer.expires, t.timer.base->get_time()); + if (rem.tv64 <= 0) + return 0; + tu = ktime_to_timespec(rem); + if (copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; + } restart = ¤t_thread_info()->restart_block; restart->fn = nanosleep_restart; - restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; - restart->arg1 = timer.expires.tv64 >> 32; + restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF; + restart->arg1 = t.timer.expires.tv64 >> 32; restart->arg2 = (unsigned long) rmtp; - restart->arg3 = (unsigned long) timer.base->index; + restart->arg3 = (unsigned long) t.timer.base->index; return -ERESTART_RESTARTBLOCK; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6edfcef291e8..ac766ad573e8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -271,6 +271,7 @@ void free_irq(unsigned int irq, void *dev_id) struct irqaction **p; unsigned long flags; + WARN_ON(in_interrupt()); if (irq >= NR_IRQS) return; diff --git a/kernel/itimer.c b/kernel/itimer.c index 680e6b70c872..204ed7939e75 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -128,16 +128,16 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value) /* * The timer is automagically restarted, when interval != 0 */ -int it_real_fn(void *data) +int it_real_fn(struct hrtimer *timer) { - struct task_struct *tsk = (struct task_struct *) data; + struct signal_struct *sig = + container_of(timer, struct signal_struct, real_timer); - send_group_sig_info(SIGALRM, SEND_SIG_PRIV, tsk); - - if (tsk->signal->it_real_incr.tv64 != 0) { - hrtimer_forward(&tsk->signal->real_timer, - tsk->signal->it_real_incr); + send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); + if (sig->it_real_incr.tv64 != 0) { + hrtimer_forward(timer, timer->base->softirq_time, + sig->it_real_incr); return HRTIMER_RESTART; } return HRTIMER_NORESTART; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1fb9f753ef60..1156eb0977d0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -323,10 +323,10 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) } /* - * This function is called from exit_thread or flush_thread when task tk's - * stack is being recycled so that we can recycle any function-return probe - * instances associated with this task. These left over instances represent - * probed functions that have been called but will never return. + * This function is called from finish_task_switch when task tk becomes dead, + * so that we can recycle any function-return probe instances associated + * with this task. These left over instances represent probed functions + * that have been called but will never return. */ void __kprobes kprobe_flush_task(struct task_struct *tk) { @@ -336,7 +336,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) unsigned long flags = 0; spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + head = kretprobe_inst_table_head(tk); hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task == tk) recycle_rp_inst(ri); diff --git a/kernel/module.c b/kernel/module.c index ddfe45ac2fd1..bd088a7c1499 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -64,26 +64,17 @@ static DEFINE_SPINLOCK(modlist_lock); static DEFINE_MUTEX(module_mutex); static LIST_HEAD(modules); -static DEFINE_MUTEX(notify_mutex); -static struct notifier_block * module_notify_list; +static BLOCKING_NOTIFIER_HEAD(module_notify_list); int register_module_notifier(struct notifier_block * nb) { - int err; - mutex_lock(¬ify_mutex); - err = notifier_chain_register(&module_notify_list, nb); - mutex_unlock(¬ify_mutex); - return err; + return blocking_notifier_chain_register(&module_notify_list, nb); } EXPORT_SYMBOL(register_module_notifier); int unregister_module_notifier(struct notifier_block * nb) { - int err; - mutex_lock(¬ify_mutex); - err = notifier_chain_unregister(&module_notify_list, nb); - mutex_unlock(¬ify_mutex); - return err; + return blocking_notifier_chain_unregister(&module_notify_list, nb); } EXPORT_SYMBOL(unregister_module_notifier); @@ -136,7 +127,7 @@ extern const unsigned long __start___kcrctab_gpl_future[]; #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL #else -#define symversion(base, idx) ((base) ? ((base) + (idx)) : NULL) +#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif /* lookup symbol in given range of kernel_symbols */ @@ -1816,9 +1807,8 @@ sys_init_module(void __user *umod, /* Drop lock so they can recurse */ mutex_unlock(&module_mutex); - mutex_lock(¬ify_mutex); - notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); - mutex_unlock(¬ify_mutex); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_COMING, mod); /* Start the module */ if (mod->init != NULL) diff --git a/kernel/panic.c b/kernel/panic.c index acd95adddb93..f895c7c01d5b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(pause_on_oops_lock); int panic_timeout; EXPORT_SYMBOL(panic_timeout); -struct notifier_block *panic_notifier_list; +ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); @@ -97,7 +97,7 @@ NORET_TYPE void panic(const char * fmt, ...) smp_send_stop(); #endif - notifier_call_chain(&panic_notifier_list, 0, buf); + atomic_notifier_call_chain(&panic_notifier_list, 0, buf); if (!panic_blink) panic_blink = no_blink; diff --git a/kernel/params.c b/kernel/params.c index 9de637a5c8bc..af43ecdc8d9b 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -31,7 +31,7 @@ #define DEBUGP(fmt, a...) #endif -static inline int dash2underscore(char c) +static inline char dash2underscore(char c) { if (c == '-') return '_'; diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 9944379360b5..ac6dc8744429 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -145,7 +145,7 @@ static int common_timer_set(struct k_itimer *, int, struct itimerspec *, struct itimerspec *); static int common_timer_del(struct k_itimer *timer); -static int posix_timer_fn(void *data); +static int posix_timer_fn(struct hrtimer *data); static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); @@ -251,15 +251,18 @@ __initcall(init_posix_timers); static void schedule_next_timer(struct k_itimer *timr) { + struct hrtimer *timer = &timr->it.real.timer; + if (timr->it.real.interval.tv64 == 0) return; - timr->it_overrun += hrtimer_forward(&timr->it.real.timer, + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), timr->it.real.interval); + timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1; ++timr->it_requeue_pending; - hrtimer_restart(&timr->it.real.timer); + hrtimer_restart(timer); } /* @@ -331,13 +334,14 @@ EXPORT_SYMBOL_GPL(posix_timer_event); * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ -static int posix_timer_fn(void *data) +static int posix_timer_fn(struct hrtimer *timer) { - struct k_itimer *timr = data; + struct k_itimer *timr; unsigned long flags; int si_private = 0; int ret = HRTIMER_NORESTART; + timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); if (timr->it.real.interval.tv64 != 0) @@ -351,7 +355,8 @@ static int posix_timer_fn(void *data) */ if (timr->it.real.interval.tv64 != 0) { timr->it_overrun += - hrtimer_forward(&timr->it.real.timer, + hrtimer_forward(timer, + timer->base->softirq_time, timr->it.real.interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; @@ -603,38 +608,41 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) static void common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { - ktime_t remaining; + ktime_t now, remaining, iv; struct hrtimer *timer = &timr->it.real.timer; memset(cur_setting, 0, sizeof(struct itimerspec)); - remaining = hrtimer_get_remaining(timer); - /* Time left ? or timer pending */ - if (remaining.tv64 > 0 || hrtimer_active(timer)) - goto calci; + iv = timr->it.real.interval; + /* interval timer ? */ - if (timr->it.real.interval.tv64 == 0) + if (iv.tv64) + cur_setting->it_interval = ktime_to_timespec(iv); + else if (!hrtimer_active(timer) && + (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) return; + + now = timer->base->get_time(); + /* - * When a requeue is pending or this is a SIGEV_NONE timer - * move the expiry time forward by intervals, so expiry is > - * now. + * When a requeue is pending or this is a SIGEV_NONE + * timer move the expiry time forward by intervals, so + * expiry is > now. */ - if (timr->it_requeue_pending & REQUEUE_PENDING || - (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { - timr->it_overrun += - hrtimer_forward(timer, timr->it.real.interval); - remaining = hrtimer_get_remaining(timer); - } - calci: - /* interval timer ? */ - if (timr->it.real.interval.tv64 != 0) - cur_setting->it_interval = - ktime_to_timespec(timr->it.real.interval); + if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || + (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) + timr->it_overrun += hrtimer_forward(timer, now, iv); + + remaining = ktime_sub(timer->expires, now); /* Return 0 only, when the timer is expired and not pending */ - if (remaining.tv64 <= 0) - cur_setting->it_value.tv_nsec = 1; - else + if (remaining.tv64 <= 0) { + /* + * A single shot SIGEV_NONE timer must return 0, when + * it is expired ! + */ + if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) + cur_setting->it_value.tv_nsec = 1; + } else cur_setting->it_value = ktime_to_timespec(remaining); } @@ -717,7 +725,6 @@ common_timer_set(struct k_itimer *timr, int flags, mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); - timr->it.real.timer.data = timr; timr->it.real.timer.function = posix_timer_fn; timer->expires = timespec_to_ktime(new_setting->it_value); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9177f3f73a6c..044b8e0c1025 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -454,10 +454,11 @@ static int load_image(struct swap_map_handle *handle, nr_pages++; } } while (ret > 0); - if (!error) + if (!error) { printk("\b\b\b\bdone\n"); - if (!snapshot_image_loaded(snapshot)) - error = -ENODATA; + if (!snapshot_image_loaded(snapshot)) + error = -ENODATA; + } return error; } diff --git a/kernel/profile.c b/kernel/profile.c index ad81f799a9b4..5a730fdb1a2c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -87,72 +87,52 @@ void __init profile_init(void) #ifdef CONFIG_PROFILING -static DECLARE_RWSEM(profile_rwsem); -static DEFINE_RWLOCK(handoff_lock); -static struct notifier_block * task_exit_notifier; -static struct notifier_block * task_free_notifier; -static struct notifier_block * munmap_notifier; +static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); +static ATOMIC_NOTIFIER_HEAD(task_free_notifier); +static BLOCKING_NOTIFIER_HEAD(munmap_notifier); void profile_task_exit(struct task_struct * task) { - down_read(&profile_rwsem); - notifier_call_chain(&task_exit_notifier, 0, task); - up_read(&profile_rwsem); + blocking_notifier_call_chain(&task_exit_notifier, 0, task); } int profile_handoff_task(struct task_struct * task) { int ret; - read_lock(&handoff_lock); - ret = notifier_call_chain(&task_free_notifier, 0, task); - read_unlock(&handoff_lock); + ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); return (ret == NOTIFY_OK) ? 1 : 0; } void profile_munmap(unsigned long addr) { - down_read(&profile_rwsem); - notifier_call_chain(&munmap_notifier, 0, (void *)addr); - up_read(&profile_rwsem); + blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); } int task_handoff_register(struct notifier_block * n) { - int err = -EINVAL; - - write_lock(&handoff_lock); - err = notifier_chain_register(&task_free_notifier, n); - write_unlock(&handoff_lock); - return err; + return atomic_notifier_chain_register(&task_free_notifier, n); } int task_handoff_unregister(struct notifier_block * n) { - int err = -EINVAL; - - write_lock(&handoff_lock); - err = notifier_chain_unregister(&task_free_notifier, n); - write_unlock(&handoff_lock); - return err; + return atomic_notifier_chain_unregister(&task_free_notifier, n); } int profile_event_register(enum profile_type type, struct notifier_block * n) { int err = -EINVAL; - down_write(&profile_rwsem); - switch (type) { case PROFILE_TASK_EXIT: - err = notifier_chain_register(&task_exit_notifier, n); + err = blocking_notifier_chain_register( + &task_exit_notifier, n); break; case PROFILE_MUNMAP: - err = notifier_chain_register(&munmap_notifier, n); + err = blocking_notifier_chain_register( + &munmap_notifier, n); break; } - up_write(&profile_rwsem); - return err; } @@ -161,18 +141,17 @@ int profile_event_unregister(enum profile_type type, struct notifier_block * n) { int err = -EINVAL; - down_write(&profile_rwsem); - switch (type) { case PROFILE_TASK_EXIT: - err = notifier_chain_unregister(&task_exit_notifier, n); + err = blocking_notifier_chain_unregister( + &task_exit_notifier, n); break; case PROFILE_MUNMAP: - err = notifier_chain_unregister(&munmap_notifier, n); + err = blocking_notifier_chain_unregister( + &munmap_notifier, n); break; } - up_write(&profile_rwsem); return err; } diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b4b362b5baf5..8154e7589d12 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -301,7 +301,7 @@ rcu_torture_printk(char *page) long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; @@ -535,7 +535,7 @@ rcu_torture_init(void) atomic_set(&n_rcu_torture_error, 0); for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) atomic_set(&rcu_torture_wcount[i], 0); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { per_cpu(rcu_torture_count, cpu)[i] = 0; per_cpu(rcu_torture_batch, cpu)[i] = 0; diff --git a/kernel/sched.c b/kernel/sched.c index 7ffaabd64f89..a9ecac398bb9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -49,6 +49,7 @@ #include <linux/syscalls.h> #include <linux/times.h> #include <linux/acct.h> +#include <linux/kprobes.h> #include <asm/tlb.h> #include <asm/unistd.h> @@ -144,7 +145,8 @@ (v1) * (v2_max) / (v1_max) #define DELTA(p) \ - (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA) + (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \ + INTERACTIVE_DELTA) #define TASK_INTERACTIVE(p) \ ((p)->prio <= (p)->static_prio - DELTA(p)) @@ -1546,8 +1548,14 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) finish_lock_switch(rq, prev); if (mm) mmdrop(mm); - if (unlikely(prev_task_flags & PF_DEAD)) + if (unlikely(prev_task_flags & PF_DEAD)) { + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); put_task_struct(prev); + } } /** @@ -1617,7 +1625,7 @@ unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; /* @@ -1634,7 +1642,7 @@ unsigned long long nr_context_switches(void) { unsigned long long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; @@ -1644,7 +1652,7 @@ unsigned long nr_iowait(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; @@ -2871,13 +2879,11 @@ asmlinkage void __sched schedule(void) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (likely(!current->exit_state)) { - if (unlikely(in_atomic())) { - printk(KERN_ERR "BUG: scheduling while atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), current->pid); - dump_stack(); - } + if (unlikely(in_atomic() && !current->exit_state)) { + printk(KERN_ERR "BUG: scheduling while atomic: " + "%s/0x%08x/%d\n", + current->comm, preempt_count(), current->pid); + dump_stack(); } profile_hit(SCHED_PROFILING, __builtin_return_address(0)); @@ -5568,11 +5574,31 @@ static int cpu_to_cpu_group(int cpu) } #endif +#ifdef CONFIG_SCHED_MC +static DEFINE_PER_CPU(struct sched_domain, core_domains); +static struct sched_group sched_group_core[NR_CPUS]; +#endif + +#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) +static int cpu_to_core_group(int cpu) +{ + return first_cpu(cpu_sibling_map[cpu]); +} +#elif defined(CONFIG_SCHED_MC) +static int cpu_to_core_group(int cpu) +{ + return cpu; +} +#endif + static DEFINE_PER_CPU(struct sched_domain, phys_domains); static struct sched_group sched_group_phys[NR_CPUS]; static int cpu_to_phys_group(int cpu) { -#ifdef CONFIG_SCHED_SMT +#if defined(CONFIG_SCHED_MC) + cpumask_t mask = cpu_coregroup_map(cpu); + return first_cpu(mask); +#elif defined(CONFIG_SCHED_SMT) return first_cpu(cpu_sibling_map[cpu]); #else return cpu; @@ -5595,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu) { return cpu_to_node(cpu); } +static void init_numa_sched_groups_power(struct sched_group *group_head) +{ + struct sched_group *sg = group_head; + int j; + + if (!sg) + return; +next_sg: + for_each_cpu_mask(j, sg->cpumask) { + struct sched_domain *sd; + + sd = &per_cpu(phys_domains, j); + if (j != first_cpu(sd->groups->cpumask)) { + /* + * Only add "power" once for each + * physical package. + */ + continue; + } + + sg->cpu_power += sd->groups->cpu_power; + } + sg = sg->next; + if (sg != group_head) + goto next_sg; +} #endif /* @@ -5670,6 +5722,17 @@ void build_sched_domains(const cpumask_t *cpu_map) sd->parent = p; sd->groups = &sched_group_phys[group]; +#ifdef CONFIG_SCHED_MC + p = sd; + sd = &per_cpu(core_domains, i); + group = cpu_to_core_group(i); + *sd = SD_MC_INIT; + sd->span = cpu_coregroup_map(i); + cpus_and(sd->span, sd->span, *cpu_map); + sd->parent = p; + sd->groups = &sched_group_core[group]; +#endif + #ifdef CONFIG_SCHED_SMT p = sd; sd = &per_cpu(cpu_domains, i); @@ -5695,6 +5758,19 @@ void build_sched_domains(const cpumask_t *cpu_map) } #endif +#ifdef CONFIG_SCHED_MC + /* Set up multi-core groups */ + for_each_cpu_mask(i, *cpu_map) { + cpumask_t this_core_map = cpu_coregroup_map(i); + cpus_and(this_core_map, this_core_map, *cpu_map); + if (i != first_cpu(this_core_map)) + continue; + init_sched_build_groups(sched_group_core, this_core_map, + &cpu_to_core_group); + } +#endif + + /* Set up physical groups */ for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); @@ -5791,51 +5867,38 @@ void build_sched_domains(const cpumask_t *cpu_map) power = SCHED_LOAD_SCALE; sd->groups->cpu_power = power; #endif +#ifdef CONFIG_SCHED_MC + sd = &per_cpu(core_domains, i); + power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) + * SCHED_LOAD_SCALE / 10; + sd->groups->cpu_power = power; + + sd = &per_cpu(phys_domains, i); + /* + * This has to be < 2 * SCHED_LOAD_SCALE + * Lets keep it SCHED_LOAD_SCALE, so that + * while calculating NUMA group's cpu_power + * we can simply do + * numa_group->cpu_power += phys_group->cpu_power; + * + * See "only add power once for each physical pkg" + * comment below + */ + sd->groups->cpu_power = SCHED_LOAD_SCALE; +#else sd = &per_cpu(phys_domains, i); power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * (cpus_weight(sd->groups->cpumask)-1) / 10; sd->groups->cpu_power = power; - -#ifdef CONFIG_NUMA - sd = &per_cpu(allnodes_domains, i); - if (sd->groups) { - power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * - (cpus_weight(sd->groups->cpumask)-1) / 10; - sd->groups->cpu_power = power; - } #endif } #ifdef CONFIG_NUMA - for (i = 0; i < MAX_NUMNODES; i++) { - struct sched_group *sg = sched_group_nodes[i]; - int j; - - if (sg == NULL) - continue; -next_sg: - for_each_cpu_mask(j, sg->cpumask) { - struct sched_domain *sd; - int power; + for (i = 0; i < MAX_NUMNODES; i++) + init_numa_sched_groups_power(sched_group_nodes[i]); - sd = &per_cpu(phys_domains, j); - if (j != first_cpu(sd->groups->cpumask)) { - /* - * Only add "power" once for each - * physical package. - */ - continue; - } - power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * - (cpus_weight(sd->groups->cpumask)-1) / 10; - - sg->cpu_power += power; - } - sg = sg->next; - if (sg != sched_group_nodes[i]) - goto next_sg; - } + init_numa_sched_groups_power(sched_group_allnodes); #endif /* Attach the domains */ @@ -5843,6 +5906,8 @@ next_sg: struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); +#elif defined(CONFIG_SCHED_MC) + sd = &per_cpu(core_domains, i); #else sd = &per_cpu(phys_domains, i); #endif @@ -6015,7 +6080,7 @@ void __init sched_init(void) runqueue_t *rq; int i, j, k; - for_each_cpu(i) { + for_each_possible_cpu(i) { prio_array_t *array; rq = cpu_rq(i); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9b3d5847ed8..ced91e1ff564 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -152,5 +152,5 @@ __init void spawn_softlockup_task(void) cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); } diff --git a/kernel/sys.c b/kernel/sys.c index 38bc73ede2ba..c93d37f71aef 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -95,99 +95,304 @@ int cad_pid = 1; * and the like. */ -static struct notifier_block *reboot_notifier_list; -static DEFINE_RWLOCK(notifier_lock); +static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); + +/* + * Notifier chain core routines. The exported routines below + * are layered on top of these, with appropriate locking added. + */ + +static int notifier_chain_register(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if (n->priority > (*nl)->priority) + break; + nl = &((*nl)->next); + } + n->next = *nl; + rcu_assign_pointer(*nl, n); + return 0; +} + +static int notifier_chain_unregister(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if ((*nl) == n) { + rcu_assign_pointer(*nl, n->next); + return 0; + } + nl = &((*nl)->next); + } + return -ENOENT; +} + +static int __kprobes notifier_call_chain(struct notifier_block **nl, + unsigned long val, void *v) +{ + int ret = NOTIFY_DONE; + struct notifier_block *nb; + + nb = rcu_dereference(*nl); + while (nb) { + ret = nb->notifier_call(nb, val, v); + if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) + break; + nb = rcu_dereference(nb->next); + } + return ret; +} + +/* + * Atomic notifier chain routines. Registration and unregistration + * use a mutex, and call_chain is synchronized by RCU (no locks). + */ /** - * notifier_chain_register - Add notifier to a notifier chain - * @list: Pointer to root list pointer + * atomic_notifier_chain_register - Add notifier to an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain * @n: New entry in notifier chain * - * Adds a notifier to a notifier chain. + * Adds a notifier to an atomic notifier chain. * * Currently always returns zero. */ + +int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_register(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + return ret; +} + +EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); + +/** + * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from an atomic notifier chain. + * + * Returns zero on success or %-ENOENT on failure. + */ +int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_unregister(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + synchronize_rcu(); + return ret; +} + +EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); + +/** + * atomic_notifier_call_chain - Call functions in an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * + * Calls each function in a notifier chain in turn. The functions + * run in an atomic context, so they must not block. + * This routine uses RCU to synchronize with changes to the chain. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ -int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) +int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v) { - write_lock(¬ifier_lock); - while(*list) - { - if(n->priority > (*list)->priority) - break; - list= &((*list)->next); - } - n->next = *list; - *list=n; - write_unlock(¬ifier_lock); - return 0; + int ret; + + rcu_read_lock(); + ret = notifier_call_chain(&nh->head, val, v); + rcu_read_unlock(); + return ret; } -EXPORT_SYMBOL(notifier_chain_register); +EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); + +/* + * Blocking notifier chain routines. All access to the chain is + * synchronized by an rwsem. + */ /** - * notifier_chain_unregister - Remove notifier from a notifier chain - * @nl: Pointer to root list pointer + * blocking_notifier_chain_register - Add notifier to a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * - * Removes a notifier from a notifier chain. + * Adds a notifier to a blocking notifier chain. + * Must be called in process context. * - * Returns zero on success, or %-ENOENT on failure. + * Currently always returns zero. */ -int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) +int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *n) { - write_lock(¬ifier_lock); - while((*nl)!=NULL) - { - if((*nl)==n) - { - *nl=n->next; - write_unlock(¬ifier_lock); - return 0; - } - nl=&((*nl)->next); - } - write_unlock(¬ifier_lock); - return -ENOENT; + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_register(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_register(&nh->head, n); + up_write(&nh->rwsem); + return ret; } -EXPORT_SYMBOL(notifier_chain_unregister); +EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); /** - * notifier_call_chain - Call functions in a notifier chain - * @n: Pointer to root pointer of notifier chain + * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a blocking notifier chain. + * Must be called from process context. + * + * Returns zero on success or %-ENOENT on failure. + */ +int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_unregister(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_unregister(&nh->head, n); + up_write(&nh->rwsem); + return ret; +} + +EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); + +/** + * blocking_notifier_call_chain - Call functions in a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * - * Calls each function in a notifier chain in turn. + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. * - * If the return value of the notifier can be and'd - * with %NOTIFY_STOP_MASK, then notifier_call_chain + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain * will return immediately, with the return value of * the notifier function which halted execution. - * Otherwise, the return value is the return value + * Otherwise the return value is the return value * of the last notifier function called. */ -int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) +int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v) { - int ret=NOTIFY_DONE; - struct notifier_block *nb = *n; + int ret; - while(nb) - { - ret=nb->notifier_call(nb,val,v); - if(ret&NOTIFY_STOP_MASK) - { - return ret; - } - nb=nb->next; - } + down_read(&nh->rwsem); + ret = notifier_call_chain(&nh->head, val, v); + up_read(&nh->rwsem); return ret; } -EXPORT_SYMBOL(notifier_call_chain); +EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); + +/* + * Raw notifier chain routines. There is no protection; + * the caller must provide it. Use at your own risk! + */ + +/** + * raw_notifier_chain_register - Add notifier to a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to a raw notifier chain. + * All locking must be provided by the caller. + * + * Currently always returns zero. + */ + +int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_register(&nh->head, n); +} + +EXPORT_SYMBOL_GPL(raw_notifier_chain_register); + +/** + * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a raw notifier chain. + * All locking must be provided by the caller. + * + * Returns zero on success or %-ENOENT on failure. + */ +int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_unregister(&nh->head, n); +} + +EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); + +/** + * raw_notifier_call_chain - Call functions in a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * + * Calls each function in a notifier chain in turn. The functions + * run in an undefined context. + * All locking must be provided by the caller. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then raw_notifier_call_chain + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ + +int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v) +{ + return notifier_call_chain(&nh->head, val, v); +} + +EXPORT_SYMBOL_GPL(raw_notifier_call_chain); /** * register_reboot_notifier - Register function to be called at reboot time @@ -196,13 +401,13 @@ EXPORT_SYMBOL(notifier_call_chain); * Registers a function with the list of functions * to be called at reboot time. * - * Currently always returns zero, as notifier_chain_register + * Currently always returns zero, as blocking_notifier_chain_register * always returns zero. */ int register_reboot_notifier(struct notifier_block * nb) { - return notifier_chain_register(&reboot_notifier_list, nb); + return blocking_notifier_chain_register(&reboot_notifier_list, nb); } EXPORT_SYMBOL(register_reboot_notifier); @@ -219,7 +424,7 @@ EXPORT_SYMBOL(register_reboot_notifier); int unregister_reboot_notifier(struct notifier_block * nb) { - return notifier_chain_unregister(&reboot_notifier_list, nb); + return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); } EXPORT_SYMBOL(unregister_reboot_notifier); @@ -380,7 +585,7 @@ EXPORT_SYMBOL_GPL(emergency_restart); void kernel_restart_prepare(char *cmd) { - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); + blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); } @@ -430,7 +635,7 @@ EXPORT_SYMBOL_GPL(kernel_kexec); void kernel_shutdown_prepare(enum system_states state) { - notifier_call_chain(&reboot_notifier_list, + blocking_notifier_call_chain(&reboot_notifier_list, (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); system_state = state; device_shutdown(); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 1067090db6b1..d82864c4a617 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -42,6 +42,10 @@ cond_syscall(sys_recvmsg); cond_syscall(sys_socketcall); cond_syscall(sys_futex); cond_syscall(compat_sys_futex); +cond_syscall(sys_set_robust_list); +cond_syscall(compat_sys_set_robust_list); +cond_syscall(sys_get_robust_list); +cond_syscall(compat_sys_get_robust_list); cond_syscall(sys_epoll_create); cond_syscall(sys_epoll_ctl); cond_syscall(sys_epoll_wait); diff --git a/kernel/time.c b/kernel/time.c index e00a97b77241..ff8e7019c4c4 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -610,7 +610,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) * * Returns the timespec representation of the nsec parameter. */ -struct timespec ns_to_timespec(const nsec_t nsec) +struct timespec ns_to_timespec(const s64 nsec) { struct timespec ts; @@ -630,7 +630,7 @@ struct timespec ns_to_timespec(const nsec_t nsec) * * Returns the timeval representation of the nsec parameter. */ -struct timeval ns_to_timeval(const nsec_t nsec) +struct timeval ns_to_timeval(const s64 nsec) { struct timespec ts = ns_to_timespec(nsec); struct timeval tv; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7e70ab13e191..6e8a60f67c7a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -172,7 +172,7 @@ config DEBUG_IOREMAP config DEBUG_FS bool "Debug Filesystem" - depends on DEBUG_KERNEL && SYSFS + depends on SYSFS help debugfs is a virtual file system that kernel developers use to put debugging files into. Enable this option to be able to read and diff --git a/lib/Makefile b/lib/Makefile index f827e3c24ec0..b830c9a15541 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o +lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/bitmap.c b/lib/bitmap.c index 8acab0e176ef..ed2ae3b0cd06 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -#if BITS_PER_LONG == 32 int __bitmap_weight(const unsigned long *bitmap, int bits) { int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) - w += hweight32(bitmap[k]); + w += hweight_long(bitmap[k]); if (bits % BITS_PER_LONG) - w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); return w; } -#else -int __bitmap_weight(const unsigned long *bitmap, int bits) -{ - int k, w = 0, lim = bits/BITS_PER_LONG; - - for (k = 0; k < lim; k++) - w += hweight64(bitmap[k]); - - if (bits % BITS_PER_LONG) - w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - - return w; -} -#endif EXPORT_SYMBOL(__bitmap_weight); /* diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c05b4b19cf6c..bda0d71a2514 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -11,48 +11,171 @@ #include <linux/bitops.h> #include <linux/module.h> +#include <asm/types.h> +#include <asm/byteorder.h> -int find_next_bit(const unsigned long *addr, int size, int offset) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { - const unsigned long *base; - const int NBITS = sizeof(*addr) * 8; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; - base = addr; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; if (offset) { - int suboffset; + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; - addr += offset / NBITS; +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} - suboffset = offset % NBITS; - if (suboffset) { - tmp = *addr; - tmp >>= suboffset; - if (tmp) - goto finish; - } +EXPORT_SYMBOL(find_next_bit); - addr++; +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} + +EXPORT_SYMBOL(find_next_zero_bit); - while ((tmp = *addr) == 0) - addr++; +#ifdef __BIG_ENDIAN - offset = (addr - base) * NBITS; +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn't support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} - finish: - /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ - while (!(tmp & 0xff)) { - offset += 8; - tmp >>= 8; +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; } - while (!(tmp & 1)) { - offset++; - tmp >>= 1; + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); - return offset; +found_middle_swap: + return result + ffz(ext2_swab(tmp)); } -EXPORT_SYMBOL(find_next_bit); +EXPORT_SYMBOL(generic_find_next_zero_le_bit); + +#endif /* __BIG_ENDIAN */ diff --git a/lib/hweight.c b/lib/hweight.c new file mode 100644 index 000000000000..438257671708 --- /dev/null +++ b/lib/hweight.c @@ -0,0 +1,53 @@ +#include <linux/module.h> +#include <asm/types.h> + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int hweight32(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; +} +EXPORT_SYMBOL(hweight32); + +unsigned int hweight16(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; +} +EXPORT_SYMBOL(hweight16); + +unsigned int hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} +EXPORT_SYMBOL(hweight8); + +unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; +#else +#error BITS_PER_LONG not defined +#endif +} +EXPORT_SYMBOL(hweight64); diff --git a/mm/Makefile b/mm/Makefile index f10c753dce6d..0b8f73f2ed16 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ page_alloc.o page-writeback.o pdflush.o \ readahead.o swap.o truncate.o vmscan.o \ - prio_tree.o util.o $(mmu-y) + prio_tree.o util.o mmzone.o $(mmu-y) obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o diff --git a/mm/bootmem.c b/mm/bootmem.c index b55bd39fc5dd..d3e3bd2ffcea 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -33,6 +33,7 @@ EXPORT_SYMBOL(max_pfn); /* This is exported so * dma_get_required_mask(), which uses * it, can be an inline function */ +static LIST_HEAD(bdata_list); #ifdef CONFIG_CRASH_DUMP /* * If we have booted due to a crash, max_pfn will be a very low value. We need @@ -52,6 +53,27 @@ unsigned long __init bootmem_bootmap_pages (unsigned long pages) return mapsize; } +/* + * link bdata in order + */ +static void link_bootmem(bootmem_data_t *bdata) +{ + bootmem_data_t *ent; + if (list_empty(&bdata_list)) { + list_add(&bdata->list, &bdata_list); + return; + } + /* insert in order */ + list_for_each_entry(ent, &bdata_list, list) { + if (bdata->node_boot_start < ent->node_boot_start) { + list_add_tail(&bdata->list, &ent->list); + return; + } + } + list_add_tail(&bdata->list, &bdata_list); + return; +} + /* * Called once to set up the allocator itself. @@ -62,13 +84,11 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat, bootmem_data_t *bdata = pgdat->bdata; unsigned long mapsize = ((end - start)+7)/8; - pgdat->pgdat_next = pgdat_list; - pgdat_list = pgdat; - mapsize = ALIGN(mapsize, sizeof(long)); bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); bdata->node_boot_start = (start << PAGE_SHIFT); bdata->node_low_pfn = end; + link_bootmem(bdata); /* * Initially all pages are reserved - setup_arch() has to @@ -383,12 +403,11 @@ unsigned long __init free_all_bootmem (void) void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) { - pg_data_t *pgdat = pgdat_list; + bootmem_data_t *bdata; void *ptr; - for_each_pgdat(pgdat) - if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, - align, goal, 0))) + list_for_each_entry(bdata, &bdata_list, list) + if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0))) return(ptr); /* @@ -416,11 +435,11 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigne void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) { - pg_data_t *pgdat = pgdat_list; + bootmem_data_t *bdata; void *ptr; - for_each_pgdat(pgdat) - if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, + list_for_each_entry(bdata, &bdata_list, list) + if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, LOW32LIMIT))) return(ptr); diff --git a/mm/highmem.c b/mm/highmem.c index d0ea1eec6a9a..55885f64af40 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -31,14 +31,9 @@ static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) { - return alloc_page(gfp_mask | GFP_DMA); -} - -static void page_pool_free(void *page, void *data) -{ - __free_page(page); + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } /* @@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data) */ #ifdef CONFIG_HIGHMEM -static void *page_pool_alloc(gfp_t gfp_mask, void *data) -{ - return alloc_page(gfp_mask); -} - static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); @@ -229,7 +219,7 @@ static __init int init_emergency_pool(void) if (!i.totalhigh) return 0; - page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); + page_pool = mempool_create_page_pool(POOL_SIZE, 0); if (!page_pool) BUG(); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); @@ -272,7 +262,8 @@ int init_emergency_isa_pool(void) if (isa_page_pool) return 0; - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); if (!isa_page_pool) BUG(); @@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) bio_put(bio); } -static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) { if (bio->bi_size) return 1; @@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int } static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, - mempool_t *pool) + mempool_t *pool) { struct page *page; struct bio *bio = NULL; diff --git a/mm/memory.c b/mm/memory.c index e347e106ca3a..8d8f52569f32 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1071,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, } if (pages) { pages[i] = page; + + flush_anon_page(page, start); flush_dcache_page(page); } if (vmas) @@ -2352,10 +2354,8 @@ int make_pages_present(unsigned long addr, unsigned long end) if (!vma) return -1; write = (vma->vm_flags & VM_WRITE) != 0; - if (addr >= end) - BUG(); - if (end > vma->vm_end) - BUG(); + BUG_ON(addr >= end); + BUG_ON(end > vma->vm_end); len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; ret = get_user_pages(current, current->mm, addr, len, write, 0, NULL, NULL); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4f71cfd29c6f..dec8249e972d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -912,7 +912,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, /* * Check if this process has the right to modify the specified * process. The right exists if the process has administrative - * capabilities, superuser priviledges or the same + * capabilities, superuser privileges or the same * userid as the target process. */ if ((current->euid != task->suid) && (current->euid != task->uid) && diff --git a/mm/mempool.c b/mm/mempool.c index f71893ed3543..fe6e05289cc5 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -183,8 +183,8 @@ EXPORT_SYMBOL(mempool_resize); */ void mempool_destroy(mempool_t *pool) { - if (pool->curr_nr != pool->min_nr) - BUG(); /* There were outstanding elements */ + /* Check for outstanding elements */ + BUG_ON(pool->curr_nr != pool->min_nr); free_pool(pool); } EXPORT_SYMBOL(mempool_destroy); @@ -289,3 +289,45 @@ void mempool_free_slab(void *element, void *pool_data) kmem_cache_free(mem, element); } EXPORT_SYMBOL(mempool_free_slab); + +/* + * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory + * specfied by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) +{ + size_t size = (size_t)(long)pool_data; + return kmalloc(size, gfp_mask); +} +EXPORT_SYMBOL(mempool_kmalloc); + +void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) +{ + size_t size = (size_t) pool_data; + return kzalloc(size, gfp_mask); +} +EXPORT_SYMBOL(mempool_kzalloc); + +void mempool_kfree(void *element, void *pool_data) +{ + kfree(element); +} +EXPORT_SYMBOL(mempool_kfree); + +/* + * A simple mempool-backed page allocator that allocates pages + * of the order specified by pool_data. + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) +{ + int order = (int)(long)pool_data; + return alloc_pages(gfp_mask, order); +} +EXPORT_SYMBOL(mempool_alloc_pages); + +void mempool_free_pages(void *element, void *pool_data) +{ + int order = (int)(long)pool_data; + __free_pages(element, order); +} +EXPORT_SYMBOL(mempool_free_pages); diff --git a/mm/mmzone.c b/mm/mmzone.c new file mode 100644 index 000000000000..b022370e612e --- /dev/null +++ b/mm/mmzone.c @@ -0,0 +1,50 @@ +/* + * linux/mm/mmzone.c + * + * management codes for pgdats and zones. + */ + + +#include <linux/config.h> +#include <linux/stddef.h> +#include <linux/mmzone.h> +#include <linux/module.h> + +struct pglist_data *first_online_pgdat(void) +{ + return NODE_DATA(first_online_node); +} + +EXPORT_SYMBOL(first_online_pgdat); + +struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) +{ + int nid = next_online_node(pgdat->node_id); + + if (nid == MAX_NUMNODES) + return NULL; + return NODE_DATA(nid); +} +EXPORT_SYMBOL(next_online_pgdat); + + +/* + * next_zone - helper magic for for_each_zone() + */ +struct zone *next_zone(struct zone *zone) +{ + pg_data_t *pgdat = zone->zone_pgdat; + + if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) + zone++; + else { + pgdat = next_online_pgdat(pgdat); + if (pgdat) + zone = pgdat->node_zones; + else + zone = NULL; + } + return zone; +} +EXPORT_SYMBOL(next_zone); + diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 338a02bb004d..dc523a1f270d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -49,7 +49,6 @@ nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; EXPORT_SYMBOL(node_online_map); nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; EXPORT_SYMBOL(node_possible_map); -struct pglist_data *pgdat_list __read_mostly; unsigned long totalram_pages __read_mostly; unsigned long totalhigh_pages __read_mostly; long nr_swap_pages; @@ -1201,7 +1200,7 @@ unsigned int nr_free_highpages (void) pg_data_t *pgdat; unsigned int pages = 0; - for_each_pgdat(pgdat) + for_each_online_pgdat(pgdat) pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; return pages; @@ -1343,7 +1342,7 @@ void get_zone_counts(unsigned long *active, *active = 0; *inactive = 0; *free = 0; - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long l, m, n; __get_zone_counts(&l, &m, &n, pgdat); *active += l; @@ -2042,7 +2041,6 @@ static __meminit void init_currently_empty_zone(struct zone *zone, zone_wait_table_init(zone, size); pgdat->nr_zones = zone_idx(zone) + 1; - zone->zone_mem_map = pfn_to_page(zone_start_pfn); zone->zone_start_pfn = zone_start_pfn; memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); @@ -2170,8 +2168,9 @@ static void *frag_start(struct seq_file *m, loff_t *pos) { pg_data_t *pgdat; loff_t node = *pos; - - for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) + for (pgdat = first_online_pgdat(); + pgdat && node; + pgdat = next_online_pgdat(pgdat)) --node; return pgdat; @@ -2182,7 +2181,7 @@ static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) pg_data_t *pgdat = (pg_data_t *)arg; (*pos)++; - return pgdat->pgdat_next; + return next_online_pgdat(pgdat); } static void frag_stop(struct seq_file *m, void *arg) @@ -2483,7 +2482,7 @@ static void setup_per_zone_lowmem_reserve(void) struct pglist_data *pgdat; int j, idx; - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long present_pages = zone->present_pages; @@ -2745,3 +2744,44 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } + +#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE +/* + * pfn <-> page translation. out-of-line version. + * (see asm-generic/memory_model.h) + */ +#if defined(CONFIG_FLATMEM) +struct page *pfn_to_page(unsigned long pfn) +{ + return mem_map + (pfn - ARCH_PFN_OFFSET); +} +unsigned long page_to_pfn(struct page *page) +{ + return (page - mem_map) + ARCH_PFN_OFFSET; +} +#elif defined(CONFIG_DISCONTIGMEM) +struct page *pfn_to_page(unsigned long pfn) +{ + int nid = arch_pfn_to_nid(pfn); + return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid); +} +unsigned long page_to_pfn(struct page *page) +{ + struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); + return (page - pgdat->node_mem_map) + pgdat->node_start_pfn; +} +#elif defined(CONFIG_SPARSEMEM) +struct page *pfn_to_page(unsigned long pfn) +{ + return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn; +} + +unsigned long page_to_pfn(struct page *page) +{ + long section_id = page_to_section(page); + return page - __section_mem_map_addr(__nr_to_section(section_id)); +} +#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */ +EXPORT_SYMBOL(pfn_to_page); +EXPORT_SYMBOL(page_to_pfn); +#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ diff --git a/mm/slab.c b/mm/slab.c index 681837499d7d..4cbf8bb13557 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size) * and we have no way of figuring out how to fix the array * that we have allocated then.... */ - for_each_cpu(i) { + for_each_possible_cpu(i) { int node = cpu_to_node(i); if (node_online(node)) @@ -3398,7 +3398,7 @@ void free_percpu(const void *objp) /* * We allocate for all cpus so we cannot use for online cpu here. */ - for_each_cpu(i) + for_each_possible_cpu(i) kfree(p->ptrs[i]); kfree(p); } diff --git a/mm/swap.c b/mm/swap.c index 91b7e2026f69..88895c249bc9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc) spin_lock(&fbc->lock); ret = fbc->count; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { long *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 78865c849f8f..acdf001d6941 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1305,7 +1305,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) current->reclaim_state = &reclaim_state; repeat: - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long freed; freed = balance_pgdat(pgdat, nr_to_free, 0); @@ -1335,7 +1335,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, cpumask_t mask; if (action == CPU_ONLINE) { - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { mask = node_to_cpumask(pgdat->node_id); if (any_online_cpu(mask) != NR_CPUS) /* One of our CPUs online: restore mask */ @@ -1351,7 +1351,7 @@ static int __init kswapd_init(void) pg_data_t *pgdat; swap_setup(); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { pid_t pid; pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 9106354c781e..a49a6975092d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -73,23 +73,23 @@ DEFINE_RWLOCK(hci_cb_list_lock); struct hci_proto *hci_proto[HCI_MAX_PROTO]; /* HCI notifiers list */ -static struct notifier_block *hci_notifier; +static ATOMIC_NOTIFIER_HEAD(hci_notifier); /* ---- HCI notifications ---- */ int hci_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&hci_notifier, nb); + return atomic_notifier_chain_register(&hci_notifier, nb); } int hci_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&hci_notifier, nb); + return atomic_notifier_chain_unregister(&hci_notifier, nb); } static void hci_notify(struct hci_dev *hdev, int event) { - notifier_call_chain(&hci_notifier, event, hdev); + atomic_notifier_call_chain(&hci_notifier, event, hdev); } /* ---- HCI requests ---- */ diff --git a/net/core/dev.c b/net/core/dev.c index 8e1dc3051222..a3ab11f34153 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex) * Our notifier list */ -static struct notifier_block *netdev_chain; +static BLOCKING_NOTIFIER_HEAD(netdev_chain); /* * Device drivers call our routines to queue packets here. We empty the @@ -736,7 +736,8 @@ int dev_change_name(struct net_device *dev, char *newname) if (!err) { hlist_del(&dev->name_hlist); hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); - notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); + blocking_notifier_call_chain(&netdev_chain, + NETDEV_CHANGENAME, dev); } return err; @@ -750,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname) */ void netdev_features_change(struct net_device *dev) { - notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); } EXPORT_SYMBOL(netdev_features_change); @@ -765,7 +766,8 @@ EXPORT_SYMBOL(netdev_features_change); void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { - notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); + blocking_notifier_call_chain(&netdev_chain, + NETDEV_CHANGE, dev); rtmsg_ifinfo(RTM_NEWLINK, dev, 0); } } @@ -862,7 +864,7 @@ int dev_open(struct net_device *dev) /* * ... and announce new interface. */ - notifier_call_chain(&netdev_chain, NETDEV_UP, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); } return ret; } @@ -885,7 +887,7 @@ int dev_close(struct net_device *dev) * Tell people we are going down, so that they can * prepare to death, when device is still operating. */ - notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); dev_deactivate(dev); @@ -922,7 +924,7 @@ int dev_close(struct net_device *dev) /* * Tell people we are down */ - notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); return 0; } @@ -953,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb) int err; rtnl_lock(); - err = notifier_chain_register(&netdev_chain, nb); + err = blocking_notifier_chain_register(&netdev_chain, nb); if (!err) { for (dev = dev_base; dev; dev = dev->next) { nb->notifier_call(nb, NETDEV_REGISTER, dev); @@ -981,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb) int err; rtnl_lock(); - err = notifier_chain_unregister(&netdev_chain, nb); + err = blocking_notifier_chain_unregister(&netdev_chain, nb); rtnl_unlock(); return err; } @@ -992,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb) * @v: pointer passed unmodified to notifier function * * Call all network notifier blocks. Parameters and return value - * are as for notifier_call_chain(). + * are as for blocking_notifier_call_chain(). */ int call_netdevice_notifiers(unsigned long val, void *v) { - return notifier_call_chain(&netdev_chain, val, v); + return blocking_notifier_call_chain(&netdev_chain, val, v); } /* When > 0 there are consumers of rx skb time stamps */ @@ -2242,7 +2244,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags) if (dev->flags & IFF_UP && ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) - notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); + blocking_notifier_call_chain(&netdev_chain, + NETDEV_CHANGE, dev); if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? +1 : -1; @@ -2286,8 +2289,8 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) else dev->mtu = new_mtu; if (!err && dev->flags & IFF_UP) - notifier_call_chain(&netdev_chain, - NETDEV_CHANGEMTU, dev); + blocking_notifier_call_chain(&netdev_chain, + NETDEV_CHANGEMTU, dev); return err; } @@ -2303,7 +2306,8 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) return -ENODEV; err = dev->set_mac_address(dev, sa); if (!err) - notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); + blocking_notifier_call_chain(&netdev_chain, + NETDEV_CHANGEADDR, dev); return err; } @@ -2359,7 +2363,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); - notifier_call_chain(&netdev_chain, + blocking_notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); return 0; @@ -2813,7 +2817,7 @@ int register_netdevice(struct net_device *dev) write_unlock_bh(&dev_base_lock); /* Notify protocols, that a new device appeared. */ - notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); /* Finish registration after unlock */ net_set_todo(dev); @@ -2892,7 +2896,7 @@ static void netdev_wait_allrefs(struct net_device *dev) rtnl_lock(); /* Rebroadcast unregister notification */ - notifier_call_chain(&netdev_chain, + blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, @@ -3148,7 +3152,7 @@ int unregister_netdevice(struct net_device *dev) /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ - notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); + blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); /* * Flush the multicast chain diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 98f0fc923f91..1e44eda1fda9 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -51,7 +51,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); rwlock_init(&queue->syn_wait_lock); - queue->rskq_accept_head = queue->rskq_accept_head = NULL; + queue->rskq_accept_head = NULL; lopt->nr_table_entries = nr_table_entries; write_lock_bh(&queue->syn_wait_lock); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index cc7b9d9255ef..d2ae9893ca17 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -68,7 +68,7 @@ __le16 decnet_address = 0; static DEFINE_RWLOCK(dndev_lock); static struct net_device *decnet_default_device; -static struct notifier_block *dnaddr_chain; +static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); static void dn_dev_delete(struct net_device *dev); @@ -446,7 +446,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de } rtmsg_ifa(RTM_DELADDR, ifa1); - notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); + blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); if (destroy) { dn_dev_free_ifa(ifa1); @@ -481,7 +481,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) dn_db->ifa_list = ifa; rtmsg_ifa(RTM_NEWADDR, ifa); - notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); + blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); return 0; } @@ -1285,12 +1285,12 @@ void dn_dev_devices_on(void) int register_dnaddr_notifier(struct notifier_block *nb) { - return notifier_chain_register(&dnaddr_chain, nb); + return blocking_notifier_chain_register(&dnaddr_chain, nb); } int unregister_dnaddr_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&dnaddr_chain, nb); + return blocking_notifier_chain_unregister(&dnaddr_chain, nb); } #ifdef CONFIG_PROC_FS diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 44fdf1413e2c..81c2f7885292 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -81,7 +81,7 @@ static struct ipv4_devconf ipv4_devconf_dflt = { static void rtmsg_ifa(int event, struct in_ifaddr *); -static struct notifier_block *inetaddr_chain; +static BLOCKING_NOTIFIER_HEAD(inetaddr_chain); static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy); #ifdef CONFIG_SYSCTL @@ -267,7 +267,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, *ifap1 = ifa->ifa_next; rtmsg_ifa(RTM_DELADDR, ifa); - notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); + blocking_notifier_call_chain(&inetaddr_chain, + NETDEV_DOWN, ifa); inet_free_ifa(ifa); } else { promote = ifa; @@ -291,7 +292,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, So that, this order is correct. */ rtmsg_ifa(RTM_DELADDR, ifa1); - notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); + blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); if (promote) { @@ -303,7 +304,8 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, promote->ifa_flags &= ~IFA_F_SECONDARY; rtmsg_ifa(RTM_NEWADDR, promote); - notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); + blocking_notifier_call_chain(&inetaddr_chain, + NETDEV_UP, promote); for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) { if (ifa1->ifa_mask != ifa->ifa_mask || !inet_ifa_match(ifa1->ifa_address, ifa)) @@ -366,7 +368,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa) Notifier will trigger FIB update, so that listeners of netlink will know about new ifaddr */ rtmsg_ifa(RTM_NEWADDR, ifa); - notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); + blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); return 0; } @@ -938,12 +940,12 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop int register_inetaddr_notifier(struct notifier_block *nb) { - return notifier_chain_register(&inetaddr_chain, nb); + return blocking_notifier_chain_register(&inetaddr_chain, nb); } int unregister_inetaddr_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&inetaddr_chain, nb); + return blocking_notifier_chain_unregister(&inetaddr_chain, nb); } /* Rename ifa_labels for a device name change. Make some effort to preserve existing diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 7f0288b25fa1..f28ec6882162 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -34,6 +34,7 @@ #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> +#include <linux/mutex.h> #include <net/ip.h> #include <net/route.h> @@ -44,7 +45,7 @@ #include <net/ip_vs.h> /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ -static DECLARE_MUTEX(__ip_vs_mutex); +static DEFINE_MUTEX(__ip_vs_mutex); /* lock for service table */ static DEFINE_RWLOCK(__ip_vs_svc_lock); @@ -1950,7 +1951,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) /* increase the module use count */ ip_vs_use_count_inc(); - if (down_interruptible(&__ip_vs_mutex)) { + if (mutex_lock_interruptible(&__ip_vs_mutex)) { ret = -ERESTARTSYS; goto out_dec; } @@ -2041,7 +2042,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) ip_vs_service_put(svc); out_unlock: - up(&__ip_vs_mutex); + mutex_unlock(&__ip_vs_mutex); out_dec: /* decrease the module use count */ ip_vs_use_count_dec(); @@ -2211,7 +2212,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) return -EFAULT; - if (down_interruptible(&__ip_vs_mutex)) + if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; switch (cmd) { @@ -2330,7 +2331,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) } out: - up(&__ip_vs_mutex); + mutex_unlock(&__ip_vs_mutex); return ret; } diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 9e34034729a6..ceaabc18202b 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -80,8 +80,8 @@ static int ip_conntrack_vmalloc; static unsigned int ip_conntrack_next_id; static unsigned int ip_conntrack_expect_next_id; #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -struct notifier_block *ip_conntrack_chain; -struct notifier_block *ip_conntrack_expect_chain; +ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain); +ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain); DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); @@ -92,7 +92,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache) { DEBUGP("ecache: delivering events for %p\n", ecache->ct); if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events) - notifier_call_chain(&ip_conntrack_chain, ecache->events, + atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events, ecache->ct); ecache->events = 0; ip_conntrack_put(ecache->ct); diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c index dc1521c5aa81..ba5e23505e88 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/ipv4/netfilter/ipt_hashlimit.c @@ -40,6 +40,7 @@ /* FIXME: this is just for IP_NF_ASSERRT */ #include <linux/netfilter_ipv4/ip_conntrack.h> +#include <linux/mutex.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); @@ -92,7 +93,7 @@ struct ipt_hashlimit_htable { }; static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ -static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ +static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static HLIST_HEAD(hashlimit_htables); static kmem_cache_t *hashlimit_cachep __read_mostly; @@ -542,13 +543,13 @@ hashlimit_checkentry(const char *tablename, * call vmalloc, and that can sleep. And we cannot just re-search * the list of htable's in htable_create(), since then we would * create duplicate proc files. -HW */ - down(&hlimit_mutex); + mutex_lock(&hlimit_mutex); r->hinfo = htable_find_get(r->name); if (!r->hinfo && (htable_create(r) != 0)) { - up(&hlimit_mutex); + mutex_unlock(&hlimit_mutex); return 0; } - up(&hlimit_mutex); + mutex_unlock(&hlimit_mutex); /* Ugly hack: For SMP, we only want to use one set */ r->u.master = r; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 01c62a0d3742..445006ee4522 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -143,7 +143,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, struct prefix_info *pinfo); static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev); -static struct notifier_block *inet6addr_chain; +static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); struct ipv6_devconf ipv6_devconf = { .forwarding = 0, @@ -593,7 +593,7 @@ out2: read_unlock_bh(&addrconf_lock); if (likely(err == 0)) - notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); else { kfree(ifa); ifa = ERR_PTR(err); @@ -688,7 +688,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ipv6_ifa_notify(RTM_DELADDR, ifp); - notifier_call_chain(&inet6addr_chain,NETDEV_DOWN,ifp); + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); addrconf_del_timer(ifp); @@ -3767,12 +3767,12 @@ static void addrconf_sysctl_unregister(struct ipv6_devconf *p) int register_inet6addr_notifier(struct notifier_block *nb) { - return notifier_chain_register(&inet6addr_chain, nb); + return atomic_notifier_chain_register(&inet6addr_chain, nb); } int unregister_inet6addr_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&inet6addr_chain,nb); + return atomic_notifier_chain_unregister(&inet6addr_chain,nb); } /* diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 028b636687ec..d4cfec3f414e 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -228,6 +228,9 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) t->id.proto = IPPROTO_IPV6; t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); + if (!t->id.spi) + goto error; + memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); memcpy(&t->sel, &x->sel, sizeof(t->sel)); t->props.family = AF_INET6; @@ -243,7 +246,9 @@ out: return t; error: + t->km.state = XFRM_STATE_DEAD; xfrm_state_put(t); + t = NULL; goto out; } diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 759445648667..627b11342233 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1302,7 +1302,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; - /* Check that we don't send out to big frames */ + /* Check that we don't send out too big frames */ if (len > self->max_data_size) { IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", __FUNCTION__, len, self->max_data_size); @@ -1546,7 +1546,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_ASSERT(self != NULL, return -1;); /* - * Check that we don't send out to big frames. This is an unreliable + * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { @@ -1642,7 +1642,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, } /* - * Check that we don't send out to big frames. This is an unreliable + * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0ae281d9bfc3..56389c83557c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -90,8 +90,8 @@ static int nf_conntrack_vmalloc; static unsigned int nf_conntrack_next_id; static unsigned int nf_conntrack_expect_next_id; #ifdef CONFIG_NF_CONNTRACK_EVENTS -struct notifier_block *nf_conntrack_chain; -struct notifier_block *nf_conntrack_expect_chain; +ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); +ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); @@ -103,7 +103,7 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) DEBUGP("ecache: delivering events for %p\n", ecache->ct); if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) && ecache->events) - notifier_call_chain(&nf_conntrack_chain, ecache->events, + atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, ecache->ct); ecache->events = 0; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d00a9034cb5f..2a233ffcf618 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -123,7 +123,7 @@ static void netlink_destroy_callback(struct netlink_callback *cb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); -static struct notifier_block *netlink_chain; +static ATOMIC_NOTIFIER_HEAD(netlink_chain); static u32 netlink_group_mask(u32 group) { @@ -469,7 +469,8 @@ static int netlink_release(struct socket *sock) .protocol = sk->sk_protocol, .pid = nlk->pid, }; - notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); + atomic_notifier_call_chain(&netlink_chain, + NETLINK_URELEASE, &n); } if (nlk->module) @@ -1695,12 +1696,12 @@ static struct file_operations netlink_seq_fops = { int netlink_register_notifier(struct notifier_block *nb) { - return notifier_chain_register(&netlink_chain, nb); + return atomic_notifier_chain_register(&netlink_chain, nb); } int netlink_unregister_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&netlink_chain, nb); + return atomic_notifier_chain_unregister(&netlink_chain, nb); } static const struct proto_ops netlink_ops = { diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 43e72419c868..f329b72578f5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -13,26 +13,27 @@ #include <linux/socket.h> #include <linux/string.h> #include <linux/skbuff.h> +#include <linux/mutex.h> #include <net/sock.h> #include <net/genetlink.h> struct sock *genl_sock = NULL; -static DECLARE_MUTEX(genl_sem); /* serialization of message processing */ +static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ static void genl_lock(void) { - down(&genl_sem); + mutex_lock(&genl_mutex); } static int genl_trylock(void) { - return down_trylock(&genl_sem); + return !mutex_trylock(&genl_mutex); } static void genl_unlock(void) { - up(&genl_sem); + mutex_unlock(&genl_mutex); if (genl_sock && genl_sock->sk_receive_queue.qlen) genl_sock->sk_data_ready(genl_sock, 0); diff --git a/net/nonet.c b/net/nonet.c index 1230f0ae832e..92e76640c7cd 100644 --- a/net/nonet.c +++ b/net/nonet.c @@ -19,7 +19,7 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) return -ENXIO; } -struct file_operations bad_sock_fops = { +const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, }; diff --git a/net/socket.c b/net/socket.c index 5211ba270375..fcd77eac0ccf 100644 --- a/net/socket.c +++ b/net/socket.c @@ -539,7 +539,7 @@ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) return -ENXIO; } -struct file_operations bad_sock_fops = { +const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, }; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 23632d84d8d7..4d7eb9e704da 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -78,7 +78,8 @@ struct rsi { static struct cache_head *rsi_table[RSI_HASHMAX]; static struct cache_detail rsi_cache; -static struct rsi *rsi_lookup(struct rsi *item, int set); +static struct rsi *rsi_update(struct rsi *new, struct rsi *old); +static struct rsi *rsi_lookup(struct rsi *item); static void rsi_free(struct rsi *rsii) { @@ -88,13 +89,11 @@ static void rsi_free(struct rsi *rsii) kfree(rsii->out_token.data); } -static void rsi_put(struct cache_head *item, struct cache_detail *cd) +static void rsi_put(struct kref *ref) { - struct rsi *rsii = container_of(item, struct rsi, h); - if (cache_put(item, cd)) { - rsi_free(rsii); - kfree(rsii); - } + struct rsi *rsii = container_of(ref, struct rsi, h.ref); + rsi_free(rsii); + kfree(rsii); } static inline int rsi_hash(struct rsi *item) @@ -103,8 +102,10 @@ static inline int rsi_hash(struct rsi *item) ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); } -static inline int rsi_match(struct rsi *item, struct rsi *tmp) +static int rsi_match(struct cache_head *a, struct cache_head *b) { + struct rsi *item = container_of(a, struct rsi, h); + struct rsi *tmp = container_of(b, struct rsi, h); return netobj_equal(&item->in_handle, &tmp->in_handle) && netobj_equal(&item->in_token, &tmp->in_token); } @@ -125,8 +126,11 @@ static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src) return dup_to_netobj(dst, src->data, src->len); } -static inline void rsi_init(struct rsi *new, struct rsi *item) +static void rsi_init(struct cache_head *cnew, struct cache_head *citem) { + struct rsi *new = container_of(cnew, struct rsi, h); + struct rsi *item = container_of(citem, struct rsi, h); + new->out_handle.data = NULL; new->out_handle.len = 0; new->out_token.data = NULL; @@ -141,8 +145,11 @@ static inline void rsi_init(struct rsi *new, struct rsi *item) item->in_token.data = NULL; } -static inline void rsi_update(struct rsi *new, struct rsi *item) +static void update_rsi(struct cache_head *cnew, struct cache_head *citem) { + struct rsi *new = container_of(cnew, struct rsi, h); + struct rsi *item = container_of(citem, struct rsi, h); + BUG_ON(new->out_handle.data || new->out_token.data); new->out_handle.len = item->out_handle.len; item->out_handle.len = 0; @@ -157,6 +164,15 @@ static inline void rsi_update(struct rsi *new, struct rsi *item) new->minor_status = item->minor_status; } +static struct cache_head *rsi_alloc(void) +{ + struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL); + if (rsii) + return &rsii->h; + else + return NULL; +} + static void rsi_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -198,6 +214,10 @@ static int rsi_parse(struct cache_detail *cd, if (dup_to_netobj(&rsii.in_token, buf, len)) goto out; + rsip = rsi_lookup(&rsii); + if (!rsip) + goto out; + rsii.h.flags = 0; /* expiry */ expiry = get_expiry(&mesg); @@ -240,12 +260,14 @@ static int rsi_parse(struct cache_detail *cd, goto out; } rsii.h.expiry_time = expiry; - rsip = rsi_lookup(&rsii, 1); + rsip = rsi_update(&rsii, rsip); status = 0; out: rsi_free(&rsii); if (rsip) - rsi_put(&rsip->h, &rsi_cache); + cache_put(&rsip->h, &rsi_cache); + else + status = -ENOMEM; return status; } @@ -257,9 +279,37 @@ static struct cache_detail rsi_cache = { .cache_put = rsi_put, .cache_request = rsi_request, .cache_parse = rsi_parse, + .match = rsi_match, + .init = rsi_init, + .update = update_rsi, + .alloc = rsi_alloc, }; -static DefineSimpleCacheLookup(rsi, 0) +static struct rsi *rsi_lookup(struct rsi *item) +{ + struct cache_head *ch; + int hash = rsi_hash(item); + + ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash); + if (ch) + return container_of(ch, struct rsi, h); + else + return NULL; +} + +static struct rsi *rsi_update(struct rsi *new, struct rsi *old) +{ + struct cache_head *ch; + int hash = rsi_hash(new); + + ch = sunrpc_cache_update(&rsi_cache, &new->h, + &old->h, hash); + if (ch) + return container_of(ch, struct rsi, h); + else + return NULL; +} + /* * The rpcsec_context cache is used to store a context that is @@ -293,7 +343,8 @@ struct rsc { static struct cache_head *rsc_table[RSC_HASHMAX]; static struct cache_detail rsc_cache; -static struct rsc *rsc_lookup(struct rsc *item, int set); +static struct rsc *rsc_update(struct rsc *new, struct rsc *old); +static struct rsc *rsc_lookup(struct rsc *item); static void rsc_free(struct rsc *rsci) { @@ -304,14 +355,12 @@ static void rsc_free(struct rsc *rsci) put_group_info(rsci->cred.cr_group_info); } -static void rsc_put(struct cache_head *item, struct cache_detail *cd) +static void rsc_put(struct kref *ref) { - struct rsc *rsci = container_of(item, struct rsc, h); + struct rsc *rsci = container_of(ref, struct rsc, h.ref); - if (cache_put(item, cd)) { - rsc_free(rsci); - kfree(rsci); - } + rsc_free(rsci); + kfree(rsci); } static inline int @@ -320,15 +369,21 @@ rsc_hash(struct rsc *rsci) return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); } -static inline int -rsc_match(struct rsc *new, struct rsc *tmp) +static int +rsc_match(struct cache_head *a, struct cache_head *b) { + struct rsc *new = container_of(a, struct rsc, h); + struct rsc *tmp = container_of(b, struct rsc, h); + return netobj_equal(&new->handle, &tmp->handle); } -static inline void -rsc_init(struct rsc *new, struct rsc *tmp) +static void +rsc_init(struct cache_head *cnew, struct cache_head *ctmp) { + struct rsc *new = container_of(cnew, struct rsc, h); + struct rsc *tmp = container_of(ctmp, struct rsc, h); + new->handle.len = tmp->handle.len; tmp->handle.len = 0; new->handle.data = tmp->handle.data; @@ -337,9 +392,12 @@ rsc_init(struct rsc *new, struct rsc *tmp) new->cred.cr_group_info = NULL; } -static inline void -rsc_update(struct rsc *new, struct rsc *tmp) +static void +update_rsc(struct cache_head *cnew, struct cache_head *ctmp) { + struct rsc *new = container_of(cnew, struct rsc, h); + struct rsc *tmp = container_of(ctmp, struct rsc, h); + new->mechctx = tmp->mechctx; tmp->mechctx = NULL; memset(&new->seqdata, 0, sizeof(new->seqdata)); @@ -348,6 +406,16 @@ rsc_update(struct rsc *new, struct rsc *tmp) tmp->cred.cr_group_info = NULL; } +static struct cache_head * +rsc_alloc(void) +{ + struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL); + if (rsci) + return &rsci->h; + else + return NULL; +} + static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen) { @@ -373,6 +441,10 @@ static int rsc_parse(struct cache_detail *cd, if (expiry == 0) goto out; + rscp = rsc_lookup(&rsci); + if (!rscp) + goto out; + /* uid, or NEGATIVE */ rv = get_int(&mesg, &rsci.cred.cr_uid); if (rv == -EINVAL) @@ -428,12 +500,14 @@ static int rsc_parse(struct cache_detail *cd, gss_mech_put(gm); } rsci.h.expiry_time = expiry; - rscp = rsc_lookup(&rsci, 1); + rscp = rsc_update(&rsci, rscp); status = 0; out: rsc_free(&rsci); if (rscp) - rsc_put(&rscp->h, &rsc_cache); + cache_put(&rscp->h, &rsc_cache); + else + status = -ENOMEM; return status; } @@ -444,9 +518,37 @@ static struct cache_detail rsc_cache = { .name = "auth.rpcsec.context", .cache_put = rsc_put, .cache_parse = rsc_parse, + .match = rsc_match, + .init = rsc_init, + .update = update_rsc, + .alloc = rsc_alloc, }; -static DefineSimpleCacheLookup(rsc, 0); +static struct rsc *rsc_lookup(struct rsc *item) +{ + struct cache_head *ch; + int hash = rsc_hash(item); + + ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash); + if (ch) + return container_of(ch, struct rsc, h); + else + return NULL; +} + +static struct rsc *rsc_update(struct rsc *new, struct rsc *old) +{ + struct cache_head *ch; + int hash = rsc_hash(new); + + ch = sunrpc_cache_update(&rsc_cache, &new->h, + &old->h, hash); + if (ch) + return container_of(ch, struct rsc, h); + else + return NULL; +} + static struct rsc * gss_svc_searchbyctx(struct xdr_netobj *handle) @@ -457,7 +559,7 @@ gss_svc_searchbyctx(struct xdr_netobj *handle) memset(&rsci, 0, sizeof(rsci)); if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) return NULL; - found = rsc_lookup(&rsci, 0); + found = rsc_lookup(&rsci); rsc_free(&rsci); if (!found) return NULL; @@ -645,6 +747,8 @@ find_gss_auth_domain(struct gss_ctx *ctx, u32 svc) return auth_domain_find(name); } +static struct auth_ops svcauthops_gss; + int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { @@ -655,20 +759,18 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out; - cache_init(&new->h.h); + kref_init(&new->h.ref); new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL); if (!new->h.name) goto out_free_dom; strcpy(new->h.name, name); - new->h.flavour = RPC_AUTH_GSS; + new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - new->h.h.expiry_time = NEVER; - test = auth_domain_lookup(&new->h, 1); - if (test == &new->h) { - BUG_ON(atomic_dec_and_test(&new->h.h.refcnt)); - } else { /* XXX Duplicate registration? */ + test = auth_domain_lookup(name, &new->h); + if (test != &new->h) { /* XXX Duplicate registration? */ auth_domain_put(&new->h); + /* dangling ref-count... */ goto out; } return 0; @@ -895,7 +997,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) goto drop; } - rsip = rsi_lookup(&rsikey, 0); + rsip = rsi_lookup(&rsikey); rsi_free(&rsikey); if (!rsip) { goto drop; @@ -970,7 +1072,7 @@ drop: ret = SVC_DROP; out: if (rsci) - rsc_put(&rsci->h, &rsc_cache); + cache_put(&rsci->h, &rsc_cache); return ret; } @@ -1062,7 +1164,7 @@ out_err: put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; if (gsd->rsci) - rsc_put(&gsd->rsci->h, &rsc_cache); + cache_put(&gsd->rsci->h, &rsc_cache); gsd->rsci = NULL; return stat; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 0acccfeeb284..3ac4193a78ed 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -37,16 +37,138 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); -void cache_init(struct cache_head *h) +static void cache_init(struct cache_head *h) { time_t now = get_seconds(); h->next = NULL; h->flags = 0; - atomic_set(&h->refcnt, 1); + kref_init(&h->ref); h->expiry_time = now + CACHE_NEW_EXPIRY; h->last_refresh = now; } +struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash) +{ + struct cache_head **head, **hp; + struct cache_head *new = NULL; + + head = &detail->hash_table[hash]; + + read_lock(&detail->hash_lock); + + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + read_unlock(&detail->hash_lock); + return tmp; + } + } + read_unlock(&detail->hash_lock); + /* Didn't find anything, insert an empty entry */ + + new = detail->alloc(); + if (!new) + return NULL; + cache_init(new); + + write_lock(&detail->hash_lock); + + /* check if entry appeared while we slept */ + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + write_unlock(&detail->hash_lock); + cache_put(new, detail); + return tmp; + } + } + detail->init(new, key); + new->next = *head; + *head = new; + detail->entries++; + cache_get(new); + write_unlock(&detail->hash_lock); + + return new; +} +EXPORT_SYMBOL(sunrpc_cache_lookup); + + +static void queue_loose(struct cache_detail *detail, struct cache_head *ch); + +static int cache_fresh_locked(struct cache_head *head, time_t expiry) +{ + head->expiry_time = expiry; + head->last_refresh = get_seconds(); + return !test_and_set_bit(CACHE_VALID, &head->flags); +} + +static void cache_fresh_unlocked(struct cache_head *head, + struct cache_detail *detail, int new) +{ + if (new) + cache_revisit_request(head); + if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { + cache_revisit_request(head); + queue_loose(detail, head); + } +} + +struct cache_head *sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash) +{ + /* The 'old' entry is to be replaced by 'new'. + * If 'old' is not VALID, we update it directly, + * otherwise we need to replace it + */ + struct cache_head **head; + struct cache_head *tmp; + int is_new; + + if (!test_bit(CACHE_VALID, &old->flags)) { + write_lock(&detail->hash_lock); + if (!test_bit(CACHE_VALID, &old->flags)) { + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &old->flags); + else + detail->update(old, new); + is_new = cache_fresh_locked(old, new->expiry_time); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(old, detail, is_new); + return old; + } + write_unlock(&detail->hash_lock); + } + /* We need to insert a new entry */ + tmp = detail->alloc(); + if (!tmp) { + cache_put(old, detail); + return NULL; + } + cache_init(tmp); + detail->init(tmp, old); + head = &detail->hash_table[hash]; + + write_lock(&detail->hash_lock); + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &tmp->flags); + else + detail->update(tmp, new); + tmp->next = *head; + *head = tmp; + cache_get(tmp); + is_new = cache_fresh_locked(tmp, new->expiry_time); + cache_fresh_locked(old, 0); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(tmp, detail, is_new); + cache_fresh_unlocked(old, detail, 0); + cache_put(old, detail); + return tmp; +} +EXPORT_SYMBOL(sunrpc_cache_update); static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); /* @@ -94,7 +216,8 @@ int cache_check(struct cache_detail *detail, clear_bit(CACHE_PENDING, &h->flags); if (rv == -EAGAIN) { set_bit(CACHE_NEGATIVE, &h->flags); - cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY); + cache_fresh_unlocked(h, detail, + cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY)); rv = -ENOENT; } break; @@ -110,25 +233,11 @@ int cache_check(struct cache_detail *detail, if (rv == -EAGAIN) cache_defer_req(rqstp, h); - if (rv && h) - detail->cache_put(h, detail); + if (rv) + cache_put(h, detail); return rv; } -static void queue_loose(struct cache_detail *detail, struct cache_head *ch); - -void cache_fresh(struct cache_detail *detail, - struct cache_head *head, time_t expiry) -{ - - head->expiry_time = expiry; - head->last_refresh = get_seconds(); - if (!test_and_set_bit(CACHE_VALID, &head->flags)) - cache_revisit_request(head); - if (test_and_clear_bit(CACHE_PENDING, &head->flags)) - queue_loose(detail, head); -} - /* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and @@ -322,7 +431,7 @@ static int cache_clean(void) if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) queue_loose(current_detail, ch); - if (atomic_read(&ch->refcnt) == 1) + if (atomic_read(&ch->ref.refcount) == 1) break; } if (ch) { @@ -337,7 +446,7 @@ static int cache_clean(void) current_index ++; spin_unlock(&cache_list_lock); if (ch) - d->cache_put(ch, d); + cache_put(ch, d); } else spin_unlock(&cache_list_lock); @@ -453,7 +562,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) /* there was one too many */ dreq->revisit(dreq, 1); } - if (test_bit(CACHE_VALID, &item->flags)) { + if (!test_bit(CACHE_PENDING, &item->flags)) { /* must have just been validated... */ cache_revisit_request(item); } @@ -614,7 +723,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); - cd->cache_put(rq->item, cd); + cache_put(rq->item, cd); kfree(rq->buf); kfree(rq); } else @@ -794,10 +903,10 @@ static void queue_loose(struct cache_detail *detail, struct cache_head *ch) if (cr->item != ch) continue; if (cr->readers != 0) - break; + continue; list_del(&cr->q.list); spin_unlock(&queue_lock); - detail->cache_put(cr->item, detail); + cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); return; @@ -1082,8 +1191,8 @@ static int c_show(struct seq_file *m, void *p) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) - seq_printf(m, "# expiry=%ld refcnt=%d\n", - cp->expiry_time, atomic_read(&cp->refcnt)); + seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", + cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); cache_get(cp); if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index aa4158be9900..cc673dd8433f 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -395,7 +395,7 @@ enum { */ struct rpc_filelist { char *name; - struct file_operations *i_fop; + const struct file_operations *i_fop; int mode; }; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b9969b91a9f7..5c3eee768504 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1167,16 +1167,12 @@ rpc_init_mempool(void) NULL, NULL); if (!rpc_buffer_slabp) goto err_nomem; - rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE, - mempool_alloc_slab, - mempool_free_slab, - rpc_task_slabp); + rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, + rpc_task_slabp); if (!rpc_task_mempool) goto err_nomem; - rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE, - mempool_alloc_slab, - mempool_free_slab, - rpc_buffer_slabp); + rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, + rpc_buffer_slabp); if (!rpc_buffer_mempool) goto err_nomem; return 0; diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 790941e8af4d..dea529666d69 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -225,7 +225,7 @@ EXPORT_SYMBOL(rpc_print_iostats); * Register/unregister RPC proc files */ static inline struct proc_dir_entry * -do_register(const char *name, void *data, struct file_operations *fops) +do_register(const char *name, void *data, const struct file_operations *fops) { struct proc_dir_entry *ent; @@ -253,7 +253,7 @@ rpc_proc_unregister(const char *name) } struct proc_dir_entry * -svc_proc_register(struct svc_stat *statp, struct file_operations *fops) +svc_proc_register(struct svc_stat *statp, const struct file_operations *fops) { return do_register(statp->program->pg_name, statp, fops); } diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 9f7373203592..769114f0f886 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -105,8 +105,6 @@ EXPORT_SYMBOL(auth_unix_lookup); EXPORT_SYMBOL(cache_check); EXPORT_SYMBOL(cache_flush); EXPORT_SYMBOL(cache_purge); -EXPORT_SYMBOL(cache_fresh); -EXPORT_SYMBOL(cache_init); EXPORT_SYMBOL(cache_register); EXPORT_SYMBOL(cache_unregister); EXPORT_SYMBOL(qword_add); @@ -142,6 +140,7 @@ EXPORT_SYMBOL(nlm_debug); extern int register_rpc_pipefs(void); extern void unregister_rpc_pipefs(void); +extern struct cache_detail ip_map_cache; static int __init init_sunrpc(void) @@ -158,7 +157,6 @@ init_sunrpc(void) #ifdef CONFIG_PROC_FS rpc_proc_init(); #endif - cache_register(&auth_domain_cache); cache_register(&ip_map_cache); out: return err; @@ -169,8 +167,6 @@ cleanup_sunrpc(void) { unregister_rpc_pipefs(); rpc_destroy_mempool(); - if (cache_unregister(&auth_domain_cache)) - printk(KERN_ERR "sunrpc: failed to unregister auth_domain cache\n"); if (cache_unregister(&ip_map_cache)) printk(KERN_ERR "sunrpc: failed to unregister ip_map cache\n"); #ifdef RPC_DEBUG diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index dda4f0c63511..5b28c6176806 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -106,112 +106,56 @@ svc_auth_unregister(rpc_authflavor_t flavor) EXPORT_SYMBOL(svc_auth_unregister); /************************************************** - * cache for domain name to auth_domain - * Entries are only added by flavours which will normally - * have a structure that 'inherits' from auth_domain. - * e.g. when an IP -> domainname is given to auth_unix, - * and the domain name doesn't exist, it will create a - * auth_unix_domain and add it to this hash table. - * If it finds the name does exist, but isn't AUTH_UNIX, - * it will complain. + * 'auth_domains' are stored in a hash table indexed by name. + * When the last reference to an 'auth_domain' is dropped, + * the object is unhashed and freed. + * If auth_domain_lookup fails to find an entry, it will return + * it's second argument 'new'. If this is non-null, it will + * have been atomically linked into the table. */ -/* - * Auth auth_domain cache is somewhat different to other caches, - * largely because the entries are possibly of different types: - * each auth flavour has it's own type. - * One consequence of this that DefineCacheLookup cannot - * allocate a new structure as it cannot know the size. - * Notice that the "INIT" code fragment is quite different - * from other caches. When auth_domain_lookup might be - * creating a new domain, the new domain is passed in - * complete and it is used as-is rather than being copied into - * another structure. - */ #define DN_HASHBITS 6 #define DN_HASHMAX (1<<DN_HASHBITS) #define DN_HASHMASK (DN_HASHMAX-1) -static struct cache_head *auth_domain_table[DN_HASHMAX]; - -static void auth_domain_drop(struct cache_head *item, struct cache_detail *cd) -{ - struct auth_domain *dom = container_of(item, struct auth_domain, h); - if (cache_put(item,cd)) - authtab[dom->flavour]->domain_release(dom); -} - - -struct cache_detail auth_domain_cache = { - .owner = THIS_MODULE, - .hash_size = DN_HASHMAX, - .hash_table = auth_domain_table, - .name = "auth.domain", - .cache_put = auth_domain_drop, -}; +static struct hlist_head auth_domain_table[DN_HASHMAX]; +static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; void auth_domain_put(struct auth_domain *dom) { - auth_domain_drop(&dom->h, &auth_domain_cache); -} - -static inline int auth_domain_hash(struct auth_domain *item) -{ - return hash_str(item->name, DN_HASHBITS); -} -static inline int auth_domain_match(struct auth_domain *tmp, struct auth_domain *item) -{ - return strcmp(tmp->name, item->name) == 0; + if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) { + hlist_del(&dom->hash); + dom->flavour->domain_release(dom); + } } struct auth_domain * -auth_domain_lookup(struct auth_domain *item, int set) +auth_domain_lookup(char *name, struct auth_domain *new) { - struct auth_domain *tmp = NULL; - struct cache_head **hp, **head; - head = &auth_domain_cache.hash_table[auth_domain_hash(item)]; - - if (set) - write_lock(&auth_domain_cache.hash_lock); - else - read_lock(&auth_domain_cache.hash_lock); - for (hp=head; *hp != NULL; hp = &tmp->h.next) { - tmp = container_of(*hp, struct auth_domain, h); - if (!auth_domain_match(tmp, item)) - continue; - if (!set) { - cache_get(&tmp->h); - goto out_noset; + struct auth_domain *hp; + struct hlist_head *head; + struct hlist_node *np; + + head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; + + spin_lock(&auth_domain_lock); + + hlist_for_each_entry(hp, np, head, hash) { + if (strcmp(hp->name, name)==0) { + kref_get(&hp->ref); + spin_unlock(&auth_domain_lock); + return hp; } - *hp = tmp->h.next; - tmp->h.next = NULL; - auth_domain_drop(&tmp->h, &auth_domain_cache); - goto out_set; } - /* Didn't find anything */ - if (!set) - goto out_nada; - auth_domain_cache.entries++; -out_set: - item->h.next = *head; - *head = &item->h; - cache_get(&item->h); - write_unlock(&auth_domain_cache.hash_lock); - cache_fresh(&auth_domain_cache, &item->h, item->h.expiry_time); - cache_get(&item->h); - return item; -out_nada: - tmp = NULL; -out_noset: - read_unlock(&auth_domain_cache.hash_lock); - return tmp; + if (new) { + hlist_add_head(&new->hash, head); + kref_get(&new->ref); + } + spin_unlock(&auth_domain_lock); + return new; } struct auth_domain *auth_domain_find(char *name) { - struct auth_domain *rv, ad; - - ad.name = name; - rv = auth_domain_lookup(&ad, 0); - return rv; + return auth_domain_lookup(name, NULL); } diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 3e6c694bbad1..7e5707e2d6b6 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -27,41 +27,35 @@ struct unix_domain { /* other stuff later */ }; +extern struct auth_ops svcauth_unix; + struct auth_domain *unix_domain_find(char *name) { - struct auth_domain *rv, ud; - struct unix_domain *new; - - ud.name = name; - - rv = auth_domain_lookup(&ud, 0); - - foundit: - if (rv && rv->flavour != RPC_AUTH_UNIX) { - auth_domain_put(rv); - return NULL; - } - if (rv) - return rv; - - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (new == NULL) - return NULL; - cache_init(&new->h.h); - new->h.name = kstrdup(name, GFP_KERNEL); - new->h.flavour = RPC_AUTH_UNIX; - new->addr_changes = 0; - new->h.h.expiry_time = NEVER; - - rv = auth_domain_lookup(&new->h, 2); - if (rv == &new->h) { - if (atomic_dec_and_test(&new->h.h.refcnt)) BUG(); - } else { - auth_domain_put(&new->h); - goto foundit; + struct auth_domain *rv; + struct unix_domain *new = NULL; + + rv = auth_domain_lookup(name, NULL); + while(1) { + if (rv) { + if (new && rv != &new->h) + auth_domain_put(&new->h); + + if (rv->flavour != &svcauth_unix) { + auth_domain_put(rv); + return NULL; + } + return rv; + } + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (new == NULL) + return NULL; + kref_init(&new->h.ref); + new->h.name = kstrdup(name, GFP_KERNEL); + new->h.flavour = &svcauth_unix; + new->addr_changes = 0; + rv = auth_domain_lookup(name, &new->h); } - - return rv; } static void svcauth_unix_domain_release(struct auth_domain *dom) @@ -90,15 +84,15 @@ struct ip_map { }; static struct cache_head *ip_table[IP_HASHMAX]; -static void ip_map_put(struct cache_head *item, struct cache_detail *cd) +static void ip_map_put(struct kref *kref) { + struct cache_head *item = container_of(kref, struct cache_head, ref); struct ip_map *im = container_of(item, struct ip_map,h); - if (cache_put(item, cd)) { - if (test_bit(CACHE_VALID, &item->flags) && - !test_bit(CACHE_NEGATIVE, &item->flags)) - auth_domain_put(&im->m_client->h); - kfree(im); - } + + if (test_bit(CACHE_VALID, &item->flags) && + !test_bit(CACHE_NEGATIVE, &item->flags)) + auth_domain_put(&im->m_client->h); + kfree(im); } #if IP_HASHBITS == 8 @@ -112,28 +106,38 @@ static inline int hash_ip(unsigned long ip) return (hash ^ (hash>>8)) & 0xff; } #endif - -static inline int ip_map_hash(struct ip_map *item) -{ - return hash_str(item->m_class, IP_HASHBITS) ^ - hash_ip((unsigned long)item->m_addr.s_addr); -} -static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) +static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) { - return strcmp(tmp->m_class, item->m_class) == 0 - && tmp->m_addr.s_addr == item->m_addr.s_addr; + struct ip_map *orig = container_of(corig, struct ip_map, h); + struct ip_map *new = container_of(cnew, struct ip_map, h); + return strcmp(orig->m_class, new->m_class) == 0 + && orig->m_addr.s_addr == new->m_addr.s_addr; } -static inline void ip_map_init(struct ip_map *new, struct ip_map *item) +static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) { + struct ip_map *new = container_of(cnew, struct ip_map, h); + struct ip_map *item = container_of(citem, struct ip_map, h); + strcpy(new->m_class, item->m_class); new->m_addr.s_addr = item->m_addr.s_addr; } -static inline void ip_map_update(struct ip_map *new, struct ip_map *item) +static void update(struct cache_head *cnew, struct cache_head *citem) { - cache_get(&item->m_client->h.h); + struct ip_map *new = container_of(cnew, struct ip_map, h); + struct ip_map *item = container_of(citem, struct ip_map, h); + + kref_get(&item->m_client->h.ref); new->m_client = item->m_client; new->m_add_change = item->m_add_change; } +static struct cache_head *ip_map_alloc(void) +{ + struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); + if (i) + return &i->h; + else + return NULL; +} static void ip_map_request(struct cache_detail *cd, struct cache_head *h, @@ -154,7 +158,8 @@ static void ip_map_request(struct cache_detail *cd, (*bpp)[-1] = '\n'; } -static struct ip_map *ip_map_lookup(struct ip_map *, int); +static struct ip_map *ip_map_lookup(char *class, struct in_addr addr); +static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); static int ip_map_parse(struct cache_detail *cd, char *mesg, int mlen) @@ -166,7 +171,11 @@ static int ip_map_parse(struct cache_detail *cd, int len; int b1,b2,b3,b4; char c; - struct ip_map ipm, *ipmp; + char class[8]; + struct in_addr addr; + int err; + + struct ip_map *ipmp; struct auth_domain *dom; time_t expiry; @@ -175,7 +184,7 @@ static int ip_map_parse(struct cache_detail *cd, mesg[mlen-1] = 0; /* class */ - len = qword_get(&mesg, ipm.m_class, sizeof(ipm.m_class)); + len = qword_get(&mesg, class, sizeof(class)); if (len <= 0) return -EINVAL; /* ip address */ @@ -200,25 +209,22 @@ static int ip_map_parse(struct cache_detail *cd, } else dom = NULL; - ipm.m_addr.s_addr = + addr.s_addr = htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); - ipm.h.flags = 0; - if (dom) { - ipm.m_client = container_of(dom, struct unix_domain, h); - ipm.m_add_change = ipm.m_client->addr_changes; + + ipmp = ip_map_lookup(class,addr); + if (ipmp) { + err = ip_map_update(ipmp, + container_of(dom, struct unix_domain, h), + expiry); } else - set_bit(CACHE_NEGATIVE, &ipm.h.flags); - ipm.h.expiry_time = expiry; + err = -ENOMEM; - ipmp = ip_map_lookup(&ipm, 1); - if (ipmp) - ip_map_put(&ipmp->h, &ip_map_cache); if (dom) auth_domain_put(dom); - if (!ipmp) - return -ENOMEM; + cache_flush(); - return 0; + return err; } static int ip_map_show(struct seq_file *m, @@ -262,32 +268,70 @@ struct cache_detail ip_map_cache = { .cache_request = ip_map_request, .cache_parse = ip_map_parse, .cache_show = ip_map_show, + .match = ip_map_match, + .init = ip_map_init, + .update = update, + .alloc = ip_map_alloc, }; -static DefineSimpleCacheLookup(ip_map, 0) +static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) +{ + struct ip_map ip; + struct cache_head *ch; + + strcpy(ip.m_class, class); + ip.m_addr = addr; + ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, + hash_str(class, IP_HASHBITS) ^ + hash_ip((unsigned long)addr.s_addr)); + + if (ch) + return container_of(ch, struct ip_map, h); + else + return NULL; +} +static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry) +{ + struct ip_map ip; + struct cache_head *ch; + + ip.m_client = udom; + ip.h.flags = 0; + if (!udom) + set_bit(CACHE_NEGATIVE, &ip.h.flags); + else { + ip.m_add_change = udom->addr_changes; + /* if this is from the legacy set_client system call, + * we need m_add_change to be one higher + */ + if (expiry == NEVER) + ip.m_add_change++; + } + ip.h.expiry_time = expiry; + ch = sunrpc_cache_update(&ip_map_cache, + &ip.h, &ipm->h, + hash_str(ipm->m_class, IP_HASHBITS) ^ + hash_ip((unsigned long)ipm->m_addr.s_addr)); + if (!ch) + return -ENOMEM; + cache_put(ch, &ip_map_cache); + return 0; +} int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) { struct unix_domain *udom; - struct ip_map ip, *ipmp; + struct ip_map *ipmp; - if (dom->flavour != RPC_AUTH_UNIX) + if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); - strcpy(ip.m_class, "nfsd"); - ip.m_addr = addr; - ip.m_client = udom; - ip.m_add_change = udom->addr_changes+1; - ip.h.flags = 0; - ip.h.expiry_time = NEVER; - - ipmp = ip_map_lookup(&ip, 1); + ipmp = ip_map_lookup("nfsd", addr); - if (ipmp) { - ip_map_put(&ipmp->h, &ip_map_cache); - return 0; - } else + if (ipmp) + return ip_map_update(ipmp, udom, NEVER); + else return -ENOMEM; } @@ -295,7 +339,7 @@ int auth_unix_forget_old(struct auth_domain *dom) { struct unix_domain *udom; - if (dom->flavour != RPC_AUTH_UNIX) + if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); udom->addr_changes++; @@ -310,7 +354,7 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr) strcpy(key.m_class, "nfsd"); key.m_addr = addr; - ipm = ip_map_lookup(&key, 0); + ipm = ip_map_lookup("nfsd", addr); if (!ipm) return NULL; @@ -323,31 +367,28 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr) rv = NULL; } else { rv = &ipm->m_client->h; - cache_get(&rv->h); + kref_get(&rv->ref); } - ip_map_put(&ipm->h, &ip_map_cache); + cache_put(&ipm->h, &ip_map_cache); return rv; } void svcauth_unix_purge(void) { cache_purge(&ip_map_cache); - cache_purge(&auth_domain_cache); } static int svcauth_unix_set_client(struct svc_rqst *rqstp) { - struct ip_map key, *ipm; + struct ip_map *ipm; rqstp->rq_client = NULL; if (rqstp->rq_proc == 0) return SVC_OK; - strcpy(key.m_class, rqstp->rq_server->sv_program->pg_class); - key.m_addr = rqstp->rq_addr.sin_addr; - - ipm = ip_map_lookup(&key, 0); + ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, + rqstp->rq_addr.sin_addr); if (ipm == NULL) return SVC_DENIED; @@ -361,8 +402,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) return SVC_DENIED; case 0: rqstp->rq_client = &ipm->m_client->h; - cache_get(&rqstp->rq_client->h); - ip_map_put(&ipm->h, &ip_map_cache); + kref_get(&rqstp->rq_client->ref); + cache_put(&ipm->h, &ip_map_cache); break; } return SVC_OK; diff --git a/sound/core/init.c b/sound/core/init.c index ad68761abba1..5bb8a8b23d51 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -223,7 +223,8 @@ int snd_card_disconnect(struct snd_card *card) struct snd_monitor_file *mfile; struct file *file; struct snd_shutdown_f_ops *s_f_ops; - struct file_operations *f_ops, *old_f_ops; + struct file_operations *f_ops; + const struct file_operations *old_f_ops; int err; spin_lock(&card->files_lock); diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 6b7a36774298..87b47c9564f7 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -631,7 +631,8 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - if ((newbuf = (char *) kmalloc(params->buffer_size, GFP_KERNEL)) == NULL) + newbuf = kmalloc(params->buffer_size, GFP_KERNEL); + if (!newbuf) return -ENOMEM; kfree(runtime->buffer); runtime->buffer = newbuf; @@ -657,7 +658,8 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - if ((newbuf = (char *) kmalloc(params->buffer_size, GFP_KERNEL)) == NULL) + newbuf = kmalloc(params->buffer_size, GFP_KERNEL); + if (!newbuf) return -ENOMEM; kfree(runtime->buffer); runtime->buffer = newbuf; diff --git a/sound/core/sound.c b/sound/core/sound.c index 4d28e5212611..108e430b5036 100644 --- a/sound/core/sound.c +++ b/sound/core/sound.c @@ -137,7 +137,7 @@ static int snd_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct snd_minor *mptr = NULL; - struct file_operations *old_fops; + const struct file_operations *old_fops; int err = 0; if (minor >= ARRAY_SIZE(snd_minors)) @@ -240,7 +240,7 @@ static int snd_kernel_minor(int type, struct snd_card *card, int dev) * Retrurns zero if successful, or a negative error code on failure. */ int snd_register_device(int type, struct snd_card *card, int dev, - struct file_operations *f_ops, void *private_data, + const struct file_operations *f_ops, void *private_data, const char *name) { int minor; diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c index 4023d3b406de..9055c6de9587 100644 --- a/sound/core/sound_oss.c +++ b/sound/core/sound_oss.c @@ -95,7 +95,7 @@ static int snd_oss_kernel_minor(int type, struct snd_card *card, int dev) } int snd_register_oss_device(int type, struct snd_card *card, int dev, - struct file_operations *f_ops, void *private_data, + const struct file_operations *f_ops, void *private_data, const char *name) { int minor = snd_oss_kernel_minor(type, card, dev); diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c index 9d10d79e27af..9ea3059a7064 100644 --- a/sound/drivers/mpu401/mpu401.c +++ b/sound/drivers/mpu401/mpu401.c @@ -59,7 +59,8 @@ module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for MPU-401 device."); static struct platform_device *platform_devices[SNDRV_CARDS]; -static int pnp_registered = 0; +static int pnp_registered; +static unsigned int snd_mpu401_devices; static int snd_mpu401_create(int dev, struct snd_card **rcard) { @@ -197,6 +198,7 @@ static int __devinit snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev, } snd_card_set_dev(card, &pnp_dev->dev); pnp_set_drvdata(pnp_dev, card); + snd_mpu401_devices++; ++dev; return 0; } @@ -234,12 +236,11 @@ static void __init_or_module snd_mpu401_unregister_all(void) static int __init alsa_card_mpu401_init(void) { - int i, err, devices; + int i, err; if ((err = platform_driver_register(&snd_mpu401_driver)) < 0) return err; - devices = 0; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) @@ -255,14 +256,13 @@ static int __init alsa_card_mpu401_init(void) goto errout; } platform_devices[i] = device; - devices++; + snd_mpu401_devices++; } - if ((err = pnp_register_driver(&snd_mpu401_pnp_driver)) >= 0) { + err = pnp_register_driver(&snd_mpu401_pnp_driver); + if (!err) pnp_registered = 1; - devices += err; - } - if (!devices) { + if (!snd_mpu401_devices) { #ifdef MODULE printk(KERN_ERR "MPU-401 device not found or device busy\n"); #endif diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c index 7051f7798ed7..31f299aed281 100644 --- a/sound/isa/ad1816a/ad1816a.c +++ b/sound/isa/ad1816a/ad1816a.c @@ -262,6 +262,8 @@ static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard return 0; } +static unsigned int __devinitdata ad1816a_devices; + static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { @@ -275,6 +277,7 @@ static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card, if (res < 0) return res; dev++; + ad1816a_devices++; return 0; } return -ENODEV; @@ -297,10 +300,13 @@ static struct pnp_card_driver ad1816a_pnpc_driver = { static int __init alsa_card_ad1816a_init(void) { - int cards; + int err; + + err = pnp_register_card_driver(&ad1816a_pnpc_driver); + if (err) + return err; - cards = pnp_register_card_driver(&ad1816a_pnpc_driver); - if (cards <= 0) { + if (!ad1816a_devices) { pnp_unregister_card_driver(&ad1816a_pnpc_driver); #ifdef MODULE printk(KERN_ERR "no AD1816A based soundcards found.\n"); diff --git a/sound/isa/als100.c b/sound/isa/als100.c index 9b77c17b3f66..a52bd8a14c9b 100644 --- a/sound/isa/als100.c +++ b/sound/isa/als100.c @@ -199,7 +199,7 @@ static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard, return 0; } -static int __init snd_card_als100_probe(int dev, +static int __devinit snd_card_als100_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { @@ -281,6 +281,8 @@ static int __init snd_card_als100_probe(int dev, return 0; } +static unsigned int __devinitdata als100_devices; + static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { @@ -294,6 +296,7 @@ static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card, if (res < 0) return res; dev++; + als100_devices++; return 0; } return -ENODEV; @@ -345,10 +348,13 @@ static struct pnp_card_driver als100_pnpc_driver = { static int __init alsa_card_als100_init(void) { - int cards; + int err; + + err = pnp_register_card_driver(&als100_pnpc_driver); + if (err) + return err; - cards = pnp_register_card_driver(&als100_pnpc_driver); - if (cards <= 0) { + if (!als100_devices) { pnp_unregister_card_driver(&als100_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no ALS100 based soundcards found\n"); diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c index a530691bf4f7..15e59283aac6 100644 --- a/sound/isa/azt2320.c +++ b/sound/isa/azt2320.c @@ -310,6 +310,8 @@ static int __devinit snd_card_azt2320_probe(int dev, return 0; } +static unsigned int __devinitdata azt2320_devices; + static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { @@ -323,6 +325,7 @@ static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card, if (res < 0) return res; dev++; + azt2320_devices++; return 0; } return -ENODEV; @@ -372,10 +375,13 @@ static struct pnp_card_driver azt2320_pnpc_driver = { static int __init alsa_card_azt2320_init(void) { - int cards; + int err; + + err = pnp_register_card_driver(&azt2320_pnpc_driver); + if (err) + return err; - cards = pnp_register_card_driver(&azt2320_pnpc_driver); - if (cards <= 0) { + if (!azt2320_devices) { pnp_unregister_card_driver(&azt2320_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c index fd9bb2575de8..fa63048a8b9d 100644 --- a/sound/isa/cmi8330.c +++ b/sound/isa/cmi8330.c @@ -175,7 +175,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_cmi8330_pnpids); #endif -static struct ad1848_mix_elem snd_cmi8330_controls[] __initdata = { +static struct ad1848_mix_elem snd_cmi8330_controls[] __devinitdata = { AD1848_DOUBLE("Master Playback Volume", 0, CMI8330_MASTVOL, CMI8330_MASTVOL, 4, 0, 15, 0), AD1848_SINGLE("Loud Playback Switch", 0, CMI8330_MUTEMUX, 6, 1, 1), AD1848_DOUBLE("PCM Playback Switch", 0, AD1848_LEFT_OUTPUT, AD1848_RIGHT_OUTPUT, 7, 7, 1, 1), @@ -204,7 +204,7 @@ AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",PLAYBACK,SWITCH), 0, CMI8330_MUTEMU }; #ifdef ENABLE_SB_MIXER -static struct sbmix_elem cmi8330_sb_mixers[] __initdata = { +static struct sbmix_elem cmi8330_sb_mixers[] __devinitdata = { SB_DOUBLE("SB Master Playback Volume", SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31), SB_DOUBLE("Tone Control - Bass", SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15), SB_DOUBLE("Tone Control - Treble", SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15), @@ -222,7 +222,7 @@ SB_DOUBLE("SB Playback Volume", SB_DSP4_OGAIN_DEV, (SB_DSP4_OGAIN_DEV + 1), 6, 6 SB_SINGLE("SB Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1), }; -static unsigned char cmi8330_sb_init_values[][2] __initdata = { +static unsigned char cmi8330_sb_init_values[][2] __devinitdata = { { SB_DSP4_MASTER_DEV + 0, 0 }, { SB_DSP4_MASTER_DEV + 1, 0 }, { SB_DSP4_PCM_DEV + 0, 0 }, @@ -545,7 +545,7 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev) return snd_card_register(card); } -static int __init snd_cmi8330_nonpnp_probe(struct platform_device *pdev) +static int __devinit snd_cmi8330_nonpnp_probe(struct platform_device *pdev) { struct snd_card *card; int err; @@ -607,6 +607,8 @@ static struct platform_driver snd_cmi8330_driver = { #ifdef CONFIG_PNP +static unsigned int __devinitdata cmi8330_pnp_devices; + static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { @@ -636,6 +638,7 @@ static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard, } pnp_set_card_drvdata(pcard, card); dev++; + cmi8330_pnp_devices++; return 0; } @@ -706,9 +709,9 @@ static int __init alsa_card_cmi8330_init(void) #ifdef CONFIG_PNP err = pnp_register_card_driver(&cmi8330_pnpc_driver); - if (err >= 0) { + if (!err) { pnp_registered = 1; - cards += err; + cards += cmi8330_pnp_devices; } #endif diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c index 4060918e0327..382bb17ef49f 100644 --- a/sound/isa/cs423x/cs4236.c +++ b/sound/isa/cs423x/cs4236.c @@ -133,6 +133,7 @@ static int pnpc_registered; static int pnp_registered; #endif #endif /* CONFIG_PNP */ +static unsigned int snd_cs423x_devices; struct snd_card_cs4236 { struct snd_cs4231 *chip; @@ -564,7 +565,7 @@ static int __init snd_cs423x_nonpnp_probe(struct platform_device *pdev) snd_card_free(card); return err; } - + platform_set_drvdata(pdev, card); return 0; } @@ -650,6 +651,7 @@ static int __devinit snd_cs4232_pnpbios_detect(struct pnp_dev *pdev, } pnp_set_drvdata(pdev, card); dev++; + snd_cs423x_devices++; return 0; } @@ -713,6 +715,7 @@ static int __devinit snd_cs423x_pnpc_detect(struct pnp_card_link *pcard, } pnp_set_card_drvdata(pcard, card); dev++; + snd_cs423x_devices++; return 0; } @@ -721,7 +724,7 @@ static void __devexit snd_cs423x_pnpc_remove(struct pnp_card_link * pcard) snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } - + #ifdef CONFIG_PM static int snd_cs423x_pnpc_suspend(struct pnp_card_link *pcard, pm_message_t state) { @@ -766,7 +769,7 @@ static void __init_or_module snd_cs423x_unregister_all(void) static int __init alsa_card_cs423x_init(void) { - int i, err, cards = 0; + int i, err; if ((err = platform_driver_register(&cs423x_nonpnp_driver)) < 0) return err; @@ -782,24 +785,20 @@ static int __init alsa_card_cs423x_init(void) goto errout; } platform_devices[i] = device; - cards++; + snd_cs423x_devices++; } #ifdef CONFIG_PNP #ifdef CS4232 - i = pnp_register_driver(&cs4232_pnp_driver); - if (i >= 0) { + err = pnp_register_driver(&cs4232_pnp_driver); + if (!err) pnp_registered = 1; - cards += i; - } #endif - i = pnp_register_card_driver(&cs423x_pnpc_driver); - if (i >= 0) { + err = pnp_register_card_driver(&cs423x_pnpc_driver); + if (!err) pnpc_registered = 1; - cards += i; - } #endif /* CONFIG_PNP */ - if (!cards) { + if (!snd_cs423x_devices) { #ifdef MODULE printk(KERN_ERR IDENT " soundcard not found or device busy\n"); #endif diff --git a/sound/isa/dt019x.c b/sound/isa/dt019x.c index 50e7bc5ef561..0acb4e5da47f 100644 --- a/sound/isa/dt019x.c +++ b/sound/isa/dt019x.c @@ -272,6 +272,8 @@ static int __devinit snd_card_dt019x_probe(int dev, struct pnp_card_link *pcard, return 0; } +static unsigned int __devinitdata dt019x_devices; + static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device_id *pid) { @@ -285,6 +287,7 @@ static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card, if (res < 0) return res; dev++; + dt019x_devices++; return 0; } return -ENODEV; @@ -336,10 +339,13 @@ static struct pnp_card_driver dt019x_pnpc_driver = { static int __init alsa_card_dt019x_init(void) { - int cards = 0; + int err; + + err = pnp_register_card_driver(&dt019x_pnpc_driver); + if (err) + return err; - cards = pnp_register_card_driver(&dt019x_pnpc_driver); - if (cards <= 0) { + if (!dt019x_devices) { pnp_unregister_card_driver(&dt019x_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no DT-019X / ALS-007 based soundcards found\n"); diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c index 721955d26194..9fbc185b4cc2 100644 --- a/sound/isa/es18xx.c +++ b/sound/isa/es18xx.c @@ -2204,7 +2204,7 @@ static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev) return snd_card_register(card); } -static int __init snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devptr) +static int __devinit snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devptr) { struct snd_card *card; int err; @@ -2221,7 +2221,7 @@ static int __init snd_es18xx_nonpnp_probe1(int dev, struct platform_device *devp return 0; } -static int __init snd_es18xx_nonpnp_probe(struct platform_device *pdev) +static int __devinit snd_es18xx_nonpnp_probe(struct platform_device *pdev) { int dev = pdev->id; int err; @@ -2297,6 +2297,8 @@ static struct platform_driver snd_es18xx_nonpnp_driver = { #ifdef CONFIG_PNP +static unsigned int __devinitdata es18xx_pnp_devices; + static int __devinit snd_audiodrive_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { @@ -2327,6 +2329,7 @@ static int __devinit snd_audiodrive_pnp_detect(struct pnp_card_link *pcard, pnp_set_card_drvdata(pcard, card); dev++; + es18xx_pnp_devices++; return 0; } @@ -2397,10 +2400,10 @@ static int __init alsa_card_es18xx_init(void) } #ifdef CONFIG_PNP - i = pnp_register_card_driver(&es18xx_pnpc_driver); - if (i >= 0) { + err = pnp_register_card_driver(&es18xx_pnpc_driver); + if (!err) { pnp_registered = 1; - cards += i; + cards += es18xx_pnp_devices; } #endif diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c index 2cacd0fa6871..de71b7a99c83 100644 --- a/sound/isa/gus/interwave.c +++ b/sound/isa/gus/interwave.c @@ -791,7 +791,7 @@ static int __devinit snd_interwave_probe(struct snd_card *card, int dev) return 0; } -static int __init snd_interwave_nonpnp_probe1(int dev, struct platform_device *devptr) +static int __devinit snd_interwave_nonpnp_probe1(int dev, struct platform_device *devptr) { struct snd_card *card; int err; @@ -809,7 +809,7 @@ static int __init snd_interwave_nonpnp_probe1(int dev, struct platform_device *d return 0; } -static int __init snd_interwave_nonpnp_probe(struct platform_device *pdev) +static int __devinit snd_interwave_nonpnp_probe(struct platform_device *pdev) { int dev = pdev->id; int err; @@ -867,6 +867,7 @@ static struct platform_driver snd_interwave_driver = { }; #ifdef CONFIG_PNP +static unsigned int __devinitdata interwave_pnp_devices; static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) @@ -897,6 +898,7 @@ static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard, } pnp_set_card_drvdata(pcard, card); dev++; + interwave_pnp_devices++; return 0; } @@ -954,10 +956,10 @@ static int __init alsa_card_interwave_init(void) } /* ISA PnP cards */ - i = pnp_register_card_driver(&interwave_pnpc_driver); - if (i >= 0) { + err = pnp_register_card_driver(&interwave_pnpc_driver); + if (!err) { pnp_registered = 1; - cards += i; + cards += interwave_pnp_devices;; } if (!cards) { diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c index 56fcd8a946a4..c906e205d7d5 100644 --- a/sound/isa/opl3sa2.c +++ b/sound/isa/opl3sa2.c @@ -95,6 +95,7 @@ static struct platform_device *platform_devices[SNDRV_CARDS]; static int pnp_registered; static int pnpc_registered; #endif +static unsigned int snd_opl3sa2_devices; /* control ports */ #define OPL3SA2_PM_CTRL 0x01 @@ -760,6 +761,7 @@ static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev, } pnp_set_drvdata(pdev, card); dev++; + snd_opl3sa2_devices++; return 0; } @@ -826,6 +828,7 @@ static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard, } pnp_set_card_drvdata(pcard, card); dev++; + snd_opl3sa2_devices++; return 0; } @@ -944,7 +947,7 @@ static void __init_or_module snd_opl3sa2_unregister_all(void) static int __init alsa_card_opl3sa2_init(void) { - int i, err, cards = 0; + int i, err; if ((err = platform_driver_register(&snd_opl3sa2_nonpnp_driver)) < 0) return err; @@ -964,23 +967,19 @@ static int __init alsa_card_opl3sa2_init(void) goto errout; } platform_devices[i] = device; - cards++; + snd_opl3sa2_devices++; } #ifdef CONFIG_PNP err = pnp_register_driver(&opl3sa2_pnp_driver); - if (err >= 0) { + if (!err) pnp_registered = 1; - cards += err; - } err = pnp_register_card_driver(&opl3sa2_pnpc_driver); - if (err >= 0) { + if (!err) pnpc_registered = 1; - cards += err; - } #endif - if (!cards) { + if (!snd_opl3sa2_devices) { #ifdef MODULE snd_printk(KERN_ERR "Yamaha OPL3-SA soundcard not found or device busy\n"); #endif diff --git a/sound/isa/sb/es968.c b/sound/isa/sb/es968.c index 9da80bfa3027..d4d65b84265a 100644 --- a/sound/isa/sb/es968.c +++ b/sound/isa/sb/es968.c @@ -124,7 +124,7 @@ static int __devinit snd_card_es968_pnp(int dev, struct snd_card_es968 *acard, return 0; } -static int __init snd_card_es968_probe(int dev, +static int __devinit snd_card_es968_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { @@ -182,6 +182,8 @@ static int __init snd_card_es968_probe(int dev, return 0; } +static unsigned int __devinitdata es968_devices; + static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { @@ -195,6 +197,7 @@ static int __devinit snd_es968_pnp_detect(struct pnp_card_link *card, if (res < 0) return res; dev++; + es968_devices++; return 0; } return -ENODEV; @@ -246,8 +249,11 @@ static struct pnp_card_driver es968_pnpc_driver = { static int __init alsa_card_es968_init(void) { - int cards = pnp_register_card_driver(&es968_pnpc_driver); - if (cards <= 0) { + int err = pnp_register_card_driver(&es968_pnpc_driver); + if (err) + return err; + + if (!es968_devices) { pnp_unregister_card_driver(&es968_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no ES968 based soundcards found\n"); diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c index 5737ab76160c..21ea65925a9e 100644 --- a/sound/isa/sb/sb16.c +++ b/sound/isa/sb/sb16.c @@ -369,7 +369,7 @@ static struct snd_card *snd_sb16_card_new(int dev) return card; } -static int __init snd_sb16_probe(struct snd_card *card, int dev) +static int __devinit snd_sb16_probe(struct snd_card *card, int dev) { int xirq, xdma8, xdma16; struct snd_sb *chip; @@ -518,7 +518,7 @@ static int snd_sb16_resume(struct snd_card *card) } #endif -static int __init snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr) +static int __devinit snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr) { struct snd_card_sb16 *acard; struct snd_card *card; @@ -548,7 +548,7 @@ static int __init snd_sb16_nonpnp_probe1(int dev, struct platform_device *devptr } -static int __init snd_sb16_nonpnp_probe(struct platform_device *pdev) +static int __devinit snd_sb16_nonpnp_probe(struct platform_device *pdev) { int dev = pdev->id; int err; @@ -629,6 +629,7 @@ static struct platform_driver snd_sb16_nonpnp_driver = { #ifdef CONFIG_PNP +static unsigned int __devinitdata sb16_pnp_devices; static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) @@ -651,6 +652,7 @@ static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard, } pnp_set_card_drvdata(pcard, card); dev++; + sb16_pnp_devices++; return 0; } @@ -727,10 +729,10 @@ static int __init alsa_card_sb16_init(void) } #ifdef CONFIG_PNP /* PnP cards at last */ - i = pnp_register_card_driver(&sb16_pnpc_driver); - if (i >= 0) { + err = pnp_register_card_driver(&sb16_pnpc_driver); + if (!err) { pnp_registered = 1; - cards += i; + cards += sb16_pnp_devices; } #endif diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c index 29bba8cc3ef3..48e5552d3444 100644 --- a/sound/isa/sscape.c +++ b/sound/isa/sscape.c @@ -1255,7 +1255,7 @@ static int __devinit create_sscape(int dev, struct snd_card **rcardp) } -static int __init snd_sscape_probe(struct platform_device *pdev) +static int __devinit snd_sscape_probe(struct platform_device *pdev) { int dev = pdev->id; struct snd_card *card; @@ -1469,7 +1469,7 @@ static int __init sscape_init(void) if (ret < 0) return ret; #ifdef CONFIG_PNP - if (pnp_register_card_driver(&sscape_pnpc_driver) >= 0) + if (pnp_register_card_driver(&sscape_pnpc_driver) == 0) pnp_registered = 1; #endif return 0; diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c index c0115bf9065e..2f13cd5d4dcb 100644 --- a/sound/isa/wavefront/wavefront.c +++ b/sound/isa/wavefront/wavefront.c @@ -589,7 +589,7 @@ snd_wavefront_probe (struct snd_card *card, int dev) return snd_card_register(card); } -static int __init snd_wavefront_nonpnp_probe(struct platform_device *pdev) +static int __devinit snd_wavefront_nonpnp_probe(struct platform_device *pdev) { int dev = pdev->id; struct snd_card *card; @@ -637,6 +637,7 @@ static struct platform_driver snd_wavefront_driver = { #ifdef CONFIG_PNP +static unsigned int __devinitdata wavefront_pnp_devices; static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) @@ -670,6 +671,7 @@ static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard, pnp_set_card_drvdata(pcard, card); dev++; + wavefront_pnp_devices++; return 0; } @@ -729,10 +731,10 @@ static int __init alsa_card_wavefront_init(void) } #ifdef CONFIG_PNP - i = pnp_register_card_driver(&wavefront_pnpc_driver); - if (i >= 0) { + err = pnp_register_card_driver(&wavefront_pnpc_driver); + if (!err) { pnp_registered = 1; - cards += i; + cards += wavefront_pnp_devices; } #endif diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c index 1fbd5137f6d7..de60a059ff5f 100644 --- a/sound/oss/cmpci.c +++ b/sound/oss/cmpci.c @@ -1713,7 +1713,7 @@ static int mixer_ioctl(struct cm_state *s, unsigned int cmd, unsigned long arg) case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */ if (get_user(val, p)) return -EFAULT; - i = generic_hweight32(val); + i = hweight32(val); for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (!(val & (1 << i))) continue; diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c index 7c59e2d4003a..c7f86f09c28d 100644 --- a/sound/oss/cs4232.c +++ b/sound/oss/cs4232.c @@ -360,6 +360,8 @@ static int __initdata synthio = -1; static int __initdata synthirq = -1; static int __initdata isapnp = 1; +static unsigned int cs4232_devices; + MODULE_DESCRIPTION("CS4232 based soundcard driver"); MODULE_AUTHOR("Hannu Savolainen, Paul Barton-Davis"); MODULE_LICENSE("GPL"); @@ -421,6 +423,7 @@ static int cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev return -ENODEV; } pnp_set_drvdata(dev,isapnpcfg); + cs4232_devices++; return 0; } @@ -455,10 +458,11 @@ static int __init init_cs4232(void) #endif cfg.irq = -1; - if (isapnp && - (pnp_register_driver(&cs4232_driver) > 0) - ) - return 0; + if (isapnp) { + pnp_register_driver(&cs4232_driver); + if (cs4232_devices) + return 0; + } if(io==-1||irq==-1||dma==-1) { @@ -503,7 +507,8 @@ static int __init setup_cs4232(char *str) int ints[7]; /* If we have isapnp cards, no need for options */ - if (pnp_register_driver(&cs4232_driver) > 0) + pnp_register_driver(&cs4232_driver); + if (cs4232_devices) return 1; str = get_options(str, ARRAY_SIZE(ints), ints); diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c index 6ba8d6f45fe8..3bbc8105e9f1 100644 --- a/sound/oss/dmasound/dmasound_awacs.c +++ b/sound/oss/dmasound/dmasound_awacs.c @@ -2798,7 +2798,7 @@ __init setup_beep(void) DBDMA_ALIGN(beep_dbdma_cmd_space); /* set up emergency dbdma cmd */ emergency_dbdma_cmd = beep_dbdma_cmd+1 ; - beep_buf = (short *) kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); + beep_buf = kmalloc(BEEP_BUFLEN * 4, GFP_KERNEL); if (beep_buf == NULL) { printk(KERN_ERR "dmasound_pmac: no memory for beep buffer\n"); kfree(beep_dbdma_cmd_space) ; diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c index 959a96794dba..25ae8e4a488d 100644 --- a/sound/oss/emu10k1/midi.c +++ b/sound/oss/emu10k1/midi.c @@ -65,7 +65,8 @@ static int midiin_add_buffer(struct emu10k1_mididevice *midi_dev, struct midi_hd init_midi_hdr(midihdr); - if ((midihdr->data = (u8 *) kmalloc(MIDIIN_BUFLEN, GFP_KERNEL)) == NULL) { + midihdr->data = kmalloc(MIDIIN_BUFLEN, GFP_KERNEL); + if (!midihdr->data) { ERROR(); kfree(midihdr); return -1; @@ -334,7 +335,8 @@ static ssize_t emu10k1_midi_write(struct file *file, const char __user *buffer, midihdr->bytesrecorded = 0; midihdr->flags = 0; - if ((midihdr->data = (u8 *) kmalloc(count, GFP_KERNEL)) == NULL) { + midihdr->data = kmalloc(count, GFP_KERNEL); + if (!midihdr->data) { ERROR(); kfree(midihdr); return -EINVAL; @@ -545,7 +547,8 @@ int emu10k1_seq_midi_out(int dev, unsigned char midi_byte) midihdr->bytesrecorded = 0; midihdr->flags = 0; - if ((midihdr->data = (u8 *) kmalloc(1, GFP_KERNEL)) == NULL) { + midihdr->data = kmalloc(1, GFP_KERNEL); + if (!midihdr->data) { ERROR(); kfree(midihdr); return -EINVAL; diff --git a/sound/oss/esssolo1.c b/sound/oss/esssolo1.c index 78d3e29ce968..6861563d7525 100644 --- a/sound/oss/esssolo1.c +++ b/sound/oss/esssolo1.c @@ -2348,7 +2348,7 @@ static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device /* Recording requires 24-bit DMA, so attempt to set dma mask * to 24 bits first, then 32 bits (playback only) if that fails. */ - if (pci_set_dma_mask(pcidev, 0x00ffffff) && + if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK) && pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { printk(KERN_WARNING "solo1: architecture does not support 24bit or 32bit PCI busmaster DMA\n"); return -ENODEV; diff --git a/sound/oss/maestro3.c b/sound/oss/maestro3.c index 66044aff2586..4a5e4237a110 100644 --- a/sound/oss/maestro3.c +++ b/sound/oss/maestro3.c @@ -2582,15 +2582,9 @@ static int alloc_dsp_suspendmem(struct m3_card *card) return 0; } -static void free_dsp_suspendmem(struct m3_card *card) -{ - if(card->suspend_mem) - vfree(card->suspend_mem); -} #else #define alloc_dsp_suspendmem(args...) 0 -#define free_dsp_suspendmem(args...) #endif /* @@ -2717,7 +2711,7 @@ out: if(ret) { if(card->iobase) release_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); - free_dsp_suspendmem(card); + vfree(card->suspend_mem); if(card->ac97) { unregister_sound_mixer(card->ac97->dev_mixer); kfree(card->ac97); @@ -2760,7 +2754,7 @@ static void m3_remove(struct pci_dev *pci_dev) } release_region(card->iobase, 256); - free_dsp_suspendmem(card); + vfree(card->suspend_mem); kfree(card); } devs = NULL; diff --git a/sound/oss/msnd.c b/sound/oss/msnd.c index a7ad2b0a2ac0..5dbfc0f9c3c7 100644 --- a/sound/oss/msnd.c +++ b/sound/oss/msnd.c @@ -95,10 +95,8 @@ void msnd_fifo_init(msnd_fifo *f) void msnd_fifo_free(msnd_fifo *f) { - if (f->data) { - vfree(f->data); - f->data = NULL; - } + vfree(f->data); + f->data = NULL; } int msnd_fifo_alloc(msnd_fifo *f, size_t n) diff --git a/sound/oss/sb_card.c b/sound/oss/sb_card.c index 680b82e15298..4708cbdc3149 100644 --- a/sound/oss/sb_card.c +++ b/sound/oss/sb_card.c @@ -52,6 +52,7 @@ static int __initdata sm_games = 0; /* Logitech soundman games? */ static struct sb_card_config *legacy = NULL; #ifdef CONFIG_PNP +static int pnp_registered; static int __initdata pnp = 1; /* static int __initdata uart401 = 0; @@ -133,7 +134,7 @@ static void sb_unload(struct sb_card_config *scc) } /* Register legacy card with OSS subsystem */ -static int sb_init_legacy(void) +static int __init sb_init_legacy(void) { struct sb_module_options sbmo = {0}; @@ -234,6 +235,8 @@ static void sb_dev2cfg(struct pnp_dev *dev, struct sb_card_config *scc) } } +static unsigned int sb_pnp_devices; + /* Probe callback function for the PnP API */ static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device_id *card_id) { @@ -264,6 +267,7 @@ static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device scc->conf.dma, scc->conf.dma2); pnp_set_card_drvdata(card, scc); + sb_pnp_devices++; return sb_register_oss(scc, &sbmo); } @@ -289,6 +293,14 @@ static struct pnp_card_driver sb_pnp_driver = { MODULE_DEVICE_TABLE(pnp_card, sb_pnp_card_table); #endif /* CONFIG_PNP */ +static void __init_or_module sb_unregister_all(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_card_driver(&sb_pnp_driver); +#endif +} + static int __init sb_init(void) { int lres = 0; @@ -307,17 +319,18 @@ static int __init sb_init(void) #ifdef CONFIG_PNP if(pnp) { - pres = pnp_register_card_driver(&sb_pnp_driver); + int err = pnp_register_card_driver(&sb_pnp_driver); + if (!err) + pnp_registered = 1; + pres = sb_pnp_devices; } #endif printk(KERN_INFO "sb: Init: Done\n"); /* If either PnP or Legacy registered a card then return * success */ - if (pres <= 0 && lres <= 0) { -#ifdef CONFIG_PNP - pnp_unregister_card_driver(&sb_pnp_driver); -#endif + if (pres == 0 && lres <= 0) { + sb_unregister_all(); return -ENODEV; } return 0; @@ -333,14 +346,10 @@ static void __exit sb_exit(void) sb_unload(legacy); } -#ifdef CONFIG_PNP - pnp_unregister_card_driver(&sb_pnp_driver); -#endif + sb_unregister_all(); - if (smw_free) { - vfree(smw_free); - smw_free = NULL; - } + vfree(smw_free); + smw_free = NULL; } module_init(sb_init); diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index 347cd79c2502..6815c30e0bc1 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -1671,14 +1671,7 @@ void sequencer_init(void) void sequencer_unload(void) { - if(queue) - { - vfree(queue); - queue=NULL; - } - if(iqueue) - { - vfree(iqueue); - iqueue=NULL; - } + vfree(queue); + vfree(iqueue); + queue = iqueue = NULL; } diff --git a/sound/oss/sh_dac_audio.c b/sound/oss/sh_dac_audio.c index 8a9917c919c2..3f7427cd195a 100644 --- a/sound/oss/sh_dac_audio.c +++ b/sound/oss/sh_dac_audio.c @@ -289,7 +289,7 @@ static int __init dac_audio_init(void) in_use = 0; - data_buffer = (char *)kmalloc(BUFFER_SIZE, GFP_KERNEL); + data_buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); if (data_buffer == NULL) return -ENOMEM; diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c index 69a4b8778b51..42bd276cfc39 100644 --- a/sound/oss/sonicvibes.c +++ b/sound/oss/sonicvibes.c @@ -116,6 +116,7 @@ #include <linux/spinlock.h> #include <linux/smp_lock.h> #include <linux/gameport.h> +#include <linux/dma-mapping.h> #include <linux/mutex.h> @@ -407,24 +408,6 @@ static inline unsigned ld2(unsigned int x) return r; } -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#ifdef hweight32 -#undef hweight32 -#endif - -static inline unsigned int hweight32(unsigned int w) -{ - unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); - res = (res & 0x33333333) + ((res >> 2) & 0x33333333); - res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); - res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); - return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); -} - /* --------------------------------------------------------------------- */ /* @@ -2553,7 +2536,7 @@ static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id return -ENODEV; if (pcidev->irq == 0) return -ENODEV; - if (pci_set_dma_mask(pcidev, 0x00ffffff)) { + if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK)) { printk(KERN_WARNING "sonicvibes: architecture does not support 24bit PCI busmaster DMA\n"); return -ENODEV; } diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c index b372e88e857f..5f140c7586b3 100644 --- a/sound/oss/vwsnd.c +++ b/sound/oss/vwsnd.c @@ -248,27 +248,6 @@ typedef struct lithium { } lithium_t; /* - * li_create initializes the lithium_t structure and sets up vm mappings - * to access the registers. - * Returns 0 on success, -errno on failure. - */ - -static int __init li_create(lithium_t *lith, unsigned long baseaddr) -{ - static void li_destroy(lithium_t *); - - spin_lock_init(&lith->lock); - lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE); - lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE); - lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE); - if (!lith->page0 || !lith->page1 || !lith->page2) { - li_destroy(lith); - return -ENOMEM; - } - return 0; -} - -/* * li_destroy destroys the lithium_t structure and vm mappings. */ @@ -289,6 +268,25 @@ static void li_destroy(lithium_t *lith) } /* + * li_create initializes the lithium_t structure and sets up vm mappings + * to access the registers. + * Returns 0 on success, -errno on failure. + */ + +static int __init li_create(lithium_t *lith, unsigned long baseaddr) +{ + spin_lock_init(&lith->lock); + lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE); + lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE); + lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE); + if (!lith->page0 || !lith->page1 || !lith->page2) { + li_destroy(lith); + return -ENOMEM; + } + return 0; +} + +/* * basic register accessors - read/write long/byte */ diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c index 2aa5a7fdb6e0..c6c8333acc62 100644 --- a/sound/pci/ad1889.c +++ b/sound/pci/ad1889.c @@ -39,6 +39,7 @@ #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <sound/driver.h> #include <sound/core.h> diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index e264136e8fb4..fc92b6896c24 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -33,6 +33,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> @@ -2220,8 +2221,8 @@ static int __devinit snd_ali_create(struct snd_card *card, if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 31 bits */ - if (pci_set_dma_mask(pci, 0x7fffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x7fffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_31BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_31BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 31bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c index 7b2ff5f4672e..100d8127a411 100644 --- a/sound/pci/als4000.c +++ b/sound/pci/als4000.c @@ -70,6 +70,7 @@ #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/rawmidi.h> @@ -688,8 +689,8 @@ static int __devinit snd_card_als4000_probe(struct pci_dev *pci, return err; } /* check, if we can restrict PCI DMA transfers to 24 bits */ - if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index e077eb3fbe2f..680077e1e057 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c @@ -104,6 +104,7 @@ #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> @@ -1669,8 +1670,8 @@ snd_azf3328_create(struct snd_card *card, chip->irq = -1; /* check if we can restrict PCI DMA transfers to 24 bits */ - if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); err = -ENXIO; goto out_err; diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index 2208dbd48be9..3e332f398162 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c @@ -36,6 +36,7 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c index 0d556b09ad04..4d62fe439177 100644 --- a/sound/pci/es1938.c +++ b/sound/pci/es1938.c @@ -55,6 +55,7 @@ #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> @@ -1517,8 +1518,8 @@ static int __devinit snd_es1938_create(struct snd_card *card, if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ - if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index dd465a186e11..e3ad17f53c29 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -104,6 +104,7 @@ #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <linux/mutex.h> #include <sound/core.h> diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 672e198317e1..b88eeba2f5d1 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c @@ -56,7 +56,9 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <linux/mutex.h> + #include <sound/core.h> #include <sound/cs8427.h> #include <sound/info.h> diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c index 8bc084956c28..44393e190929 100644 --- a/sound/pci/maestro3.c +++ b/sound/pci/maestro3.c @@ -41,6 +41,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index 43ee3b2b948f..b5a095052d4c 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c @@ -28,6 +28,8 @@ #include <linux/dma-mapping.h> #include <linux/moduleparam.h> #include <linux/mutex.h> +#include <linux/dma-mapping.h> + #include <sound/core.h> #include <sound/initval.h> #include <sound/info.h> diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c index f679779d96e3..35875c8aa299 100644 --- a/sound/pci/pcxhr/pcxhr.c +++ b/sound/pci/pcxhr/pcxhr.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/mutex.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/initval.h> diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index 0cbef5fe6c63..ab78544bf042 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c @@ -313,7 +313,7 @@ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int chann } /* - * SPDIF I/O capabilites (half-duplex mode) + * SPDIF I/O capabilities (half-duplex mode) */ static struct snd_pcm_hardware snd_rme32_spdif_info = { .info = (SNDRV_PCM_INFO_MMAP_IOMEM | @@ -339,7 +339,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_info = { }; /* - * ADAT I/O capabilites (half-duplex mode) + * ADAT I/O capabilities (half-duplex mode) */ static struct snd_pcm_hardware snd_rme32_adat_info = { @@ -364,7 +364,7 @@ static struct snd_pcm_hardware snd_rme32_adat_info = }; /* - * SPDIF I/O capabilites (full-duplex mode) + * SPDIF I/O capabilities (full-duplex mode) */ static struct snd_pcm_hardware snd_rme32_spdif_fd_info = { .info = (SNDRV_PCM_INFO_MMAP | @@ -390,7 +390,7 @@ static struct snd_pcm_hardware snd_rme32_spdif_fd_info = { }; /* - * ADAT I/O capabilites (full-duplex mode) + * ADAT I/O capabilities (full-duplex mode) */ static struct snd_pcm_hardware snd_rme32_adat_fd_info = { diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c index 0e694b011dcc..6c2a9f4a7659 100644 --- a/sound/pci/rme96.c +++ b/sound/pci/rme96.c @@ -359,7 +359,7 @@ snd_rme96_capture_copy(struct snd_pcm_substream *substream, } /* - * Digital output capabilites (S/PDIF) + * Digital output capabilities (S/PDIF) */ static struct snd_pcm_hardware snd_rme96_playback_spdif_info = { @@ -388,7 +388,7 @@ static struct snd_pcm_hardware snd_rme96_playback_spdif_info = }; /* - * Digital input capabilites (S/PDIF) + * Digital input capabilities (S/PDIF) */ static struct snd_pcm_hardware snd_rme96_capture_spdif_info = { @@ -417,7 +417,7 @@ static struct snd_pcm_hardware snd_rme96_capture_spdif_info = }; /* - * Digital output capabilites (ADAT) + * Digital output capabilities (ADAT) */ static struct snd_pcm_hardware snd_rme96_playback_adat_info = { @@ -442,7 +442,7 @@ static struct snd_pcm_hardware snd_rme96_playback_adat_info = }; /* - * Digital input capabilites (ADAT) + * Digital input capabilities (ADAT) */ static struct snd_pcm_hardware snd_rme96_capture_adat_info = { diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 980b9cd689dd..b5538efd146b 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -2256,7 +2256,7 @@ static int snd_hdspm_create_controls(struct snd_card *card, struct hdspm * hdspm } /* Channel playback mixer as default control - Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats to big for any alsamixer + Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders, thats too big for any alsamixer they are accesible via special IOCTL on hwdep and the mixer 2dimensional mixer control */ diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c index 7bbea3738b8a..2d66a09fe5ee 100644 --- a/sound/pci/sonicvibes.c +++ b/sound/pci/sonicvibes.c @@ -30,6 +30,7 @@ #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> @@ -1227,8 +1228,8 @@ static int __devinit snd_sonicvibes_create(struct snd_card *card, if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ - if (pci_set_dma_mask(pci, 0x00ffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x00ffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_24BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_24BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c index 83b7d8aba9e6..52178b8ad49d 100644 --- a/sound/pci/trident/trident_main.c +++ b/sound/pci/trident/trident_main.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/gameport.h> +#include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/info.h> @@ -3554,8 +3555,8 @@ int __devinit snd_trident_create(struct snd_card *card, if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 30 bits */ - if (pci_set_dma_mask(pci, 0x3fffffff) < 0 || - pci_set_consistent_dma_mask(pci, 0x3fffffff) < 0) { + if (pci_set_dma_mask(pci, DMA_30BIT_MASK) < 0 || + pci_set_consistent_dma_mask(pci, DMA_30BIT_MASK) < 0) { snd_printk(KERN_ERR "architecture does not support 30bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; diff --git a/sound/sound_core.c b/sound/sound_core.c index 394b53e20cb8..6f849720aef3 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c @@ -53,7 +53,7 @@ struct sound_unit { int unit_minor; - struct file_operations *unit_fops; + const struct file_operations *unit_fops; struct sound_unit *next; char name[32]; }; @@ -73,7 +73,7 @@ EXPORT_SYMBOL(sound_class); * join into it. Called with the lock asserted */ -static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, struct file_operations *fops, int index, int low, int top) +static int __sound_insert_unit(struct sound_unit * s, struct sound_unit **list, const struct file_operations *fops, int index, int low, int top) { int n=low; @@ -153,7 +153,7 @@ static DEFINE_SPINLOCK(sound_loader_lock); * list. Acquires locks as needed */ -static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev) +static int sound_insert_unit(struct sound_unit **list, const struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev) { struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL); int r; @@ -237,7 +237,7 @@ static struct sound_unit *chains[SOUND_STEP]; * a negative error code is returned. */ -int register_sound_special_device(struct file_operations *fops, int unit, +int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev) { const int chain = unit % SOUND_STEP; @@ -301,7 +301,7 @@ int register_sound_special_device(struct file_operations *fops, int unit, EXPORT_SYMBOL(register_sound_special_device); -int register_sound_special(struct file_operations *fops, int unit) +int register_sound_special(const struct file_operations *fops, int unit) { return register_sound_special_device(fops, unit, NULL); } @@ -318,7 +318,7 @@ EXPORT_SYMBOL(register_sound_special); * number is returned, on failure a negative error code is returned. */ -int register_sound_mixer(struct file_operations *fops, int dev) +int register_sound_mixer(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[0], fops, dev, 0, 128, "mixer", S_IRUSR | S_IWUSR, NULL); @@ -336,7 +336,7 @@ EXPORT_SYMBOL(register_sound_mixer); * number is returned, on failure a negative error code is returned. */ -int register_sound_midi(struct file_operations *fops, int dev) +int register_sound_midi(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[2], fops, dev, 2, 130, "midi", S_IRUSR | S_IWUSR, NULL); @@ -362,7 +362,7 @@ EXPORT_SYMBOL(register_sound_midi); * and will always allocate them as a matching pair - eg dsp3/audio3 */ -int register_sound_dsp(struct file_operations *fops, int dev) +int register_sound_dsp(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[3], fops, dev, 3, 131, "dsp", S_IWUSR | S_IRUSR, NULL); @@ -381,7 +381,7 @@ EXPORT_SYMBOL(register_sound_dsp); */ -int register_sound_synth(struct file_operations *fops, int dev) +int register_sound_synth(const struct file_operations *fops, int dev) { return sound_insert_unit(&chains[9], fops, dev, 9, 137, "synth", S_IRUSR | S_IWUSR, NULL); @@ -501,7 +501,7 @@ int soundcore_open(struct inode *inode, struct file *file) int chain; int unit = iminor(inode); struct sound_unit *s; - struct file_operations *new_fops = NULL; + const struct file_operations *new_fops = NULL; chain=unit&0x0F; if(chain==4 || chain==5) /* dsp/audio/dsp16 */ @@ -540,7 +540,7 @@ int soundcore_open(struct inode *inode, struct file *file) * switching ->f_op in the first place. */ int err = 0; - struct file_operations *old_fops = file->f_op; + const struct file_operations *old_fops = file->f_op; file->f_op = new_fops; spin_unlock(&sound_loader_lock); if(file->f_op->open) diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c index 315855082fe1..fe67a92e2a1a 100644 --- a/sound/usb/usx2y/usx2yhwdeppcm.c +++ b/sound/usb/usx2y/usx2yhwdeppcm.c @@ -404,7 +404,7 @@ static void usX2Y_usbpcm_subs_startup(struct snd_usX2Y_substream *subs) struct usX2Ydev * usX2Y = subs->usX2Y; usX2Y->prepare_subs = subs; subs->urb[0]->start_frame = -1; - smp_wmb(); // Make shure above modifications are seen by i_usX2Y_subs_startup() + smp_wmb(); // Make sure above modifications are seen by i_usX2Y_subs_startup() usX2Y_urbs_set_complete(usX2Y, i_usX2Y_usbpcm_subs_startup); } |