Merge branch 'next-queue' into next
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a07c0f3..a986e9b 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -159,3 +159,14 @@
device. This is useful to ensure auto probing won't
match the driver to the device. For example:
# echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
+
+What: /sys/bus/usb/device/.../avoid_reset
+Date: December 2009
+Contact: Oliver Neukum <oliver@neukum.org>
+Description:
+ Writing 1 to this file tells the kernel that this
+ device will morph into another mode when it is reset.
+ Drivers will not use reset for error handling for
+ such devices.
+Users:
+ usb_modeswitch
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
index a1cb660..1d77539 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -1,4 +1,4 @@
-What: /sys/devices/platform/asus-laptop/display
+What: /sys/devices/platform/asus_laptop/display
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -13,7 +13,7 @@
Ex: - 0 (0000b) means no display
- 3 (0011b) CRT+LCD.
-What: /sys/devices/platform/asus-laptop/gps
+What: /sys/devices/platform/asus_laptop/gps
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -21,7 +21,7 @@
Control the gps device. 1 means on, 0 means off.
Users: Lapsus
-What: /sys/devices/platform/asus-laptop/ledd
+What: /sys/devices/platform/asus_laptop/ledd
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -29,11 +29,11 @@
Some models like the W1N have a LED display that can be
used to display several informations.
To control the LED display, use the following :
- echo 0x0T000DDD > /sys/devices/platform/asus-laptop/
+ echo 0x0T000DDD > /sys/devices/platform/asus_laptop/
where T control the 3 letters display, and DDD the 3 digits display.
The DDD table can be found in Documentation/laptops/asus-laptop.txt
-What: /sys/devices/platform/asus-laptop/bluetooth
+What: /sys/devices/platform/asus_laptop/bluetooth
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@
This may control the led, the device or both.
Users: Lapsus
-What: /sys/devices/platform/asus-laptop/wlan
+What: /sys/devices/platform/asus_laptop/wlan
Date: January 2007
KernelVersion: 2.6.20
Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
index 7445dfb..5b026c6 100644
--- a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
@@ -1,4 +1,4 @@
-What: /sys/devices/platform/eeepc-laptop/disp
+What: /sys/devices/platform/eeepc/disp
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -9,21 +9,21 @@
- 3 = LCD+CRT
If you run X11, you should use xrandr instead.
-What: /sys/devices/platform/eeepc-laptop/camera
+What: /sys/devices/platform/eeepc/camera
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
Control the camera. 1 means on, 0 means off.
-What: /sys/devices/platform/eeepc-laptop/cardr
+What: /sys/devices/platform/eeepc/cardr
Date: May 2008
KernelVersion: 2.6.26
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
Control the card reader. 1 means on, 0 means off.
-What: /sys/devices/platform/eeepc-laptop/cpufv
+What: /sys/devices/platform/eeepc/cpufv
Date: Jun 2009
KernelVersion: 2.6.31
Contact: "Corentin Chary" <corentincj@iksaif.net>
@@ -42,7 +42,7 @@
`------------ Availables modes
For example, 0x301 means: mode 1 selected, 3 available modes.
-What: /sys/devices/platform/eeepc-laptop/available_cpufv
+What: /sys/devices/platform/eeepc/available_cpufv
Date: Jun 2009
KernelVersion: 2.6.31
Contact: "Corentin Chary" <corentincj@iksaif.net>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index f9a6e2c..1b2dd4f 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -45,7 +45,7 @@
</sect1>
<sect1><title>Atomic and pointer manipulation</title>
-!Iarch/x86/include/asm/atomic_32.h
+!Iarch/x86/include/asm/atomic.h
!Iarch/x86/include/asm/unaligned.h
</sect1>
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index 3ed8812..c1ed6a4 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -316,7 +316,7 @@
<chapter id="pubfunctions">
<title>Public Functions Provided</title>
-!Iarch/x86/include/asm/io_32.h
+!Iarch/x86/include/asm/io.h
!Elib/iomap.c
</chapter>
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 839efd8..cf6d0d8 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -74,6 +74,9 @@
This disables every write access on the device for
read-only mounts or snapshots. This option will fail
for r/w mounts on an unclean volume.
+discard Issue discard/TRIM commands to the underlying block
+ device when blocks are freed. This is useful for SSD
+ devices and sparse/thinly-provisioned LUNs.
NILFS2 usage
============
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 81c0c59..e1bb5b2 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,7 +15,8 @@
* Intel 82801I (ICH9)
* Intel EP80579 (Tolapai)
* Intel 82801JI (ICH10)
- * Intel PCH
+ * Intel 3400/5 Series (PCH)
+ * Intel Cougar Point (PCH)
Datasheets: Publicly available at the Intel website
Authors:
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index dceaba1..2461c7b 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -29,6 +29,9 @@
Earlier kernels defaulted to type=0 (Philips). But now, if the type
parameter is missing, the driver will simply fail to initialize.
+SMBus alert support is available on adapters which have this line properly
+connected to the parallel port's interrupt pin.
+
Building your own adapter
-------------------------
diff --git a/Documentation/i2c/busses/i2c-parport-light b/Documentation/i2c/busses/i2c-parport-light
index 2874364..bdc9cbb 100644
--- a/Documentation/i2c/busses/i2c-parport-light
+++ b/Documentation/i2c/busses/i2c-parport-light
@@ -9,3 +9,14 @@
and the impossibility to daisy-chain other parallel port devices.
Please see i2c-parport for documentation.
+
+Module parameters:
+
+* type: type of adapter (see i2c-parport or modinfo)
+
+* base: base I/O address
+ Default is 0x378 which is fairly common for parallel ports, at least on PC.
+
+* irq: optional IRQ
+ This must be passed if you want SMBus alert support, assuming your adapter
+ actually supports this.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 9df4744..7c19d1a 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -185,6 +185,22 @@
require PEC checksums.
+SMBus Alert
+===========
+
+SMBus Alert was introduced in Revision 1.0 of the specification.
+
+The SMBus alert protocol allows several SMBus slave devices to share a
+single interrupt pin on the SMBus master, while still allowing the master
+to know which slave triggered the interrupt.
+
+This is implemented the following way in the Linux kernel:
+* I2C bus drivers which support SMBus alert should call
+ i2c_setup_smbus_alert() to setup SMBus alert support.
+* I2C drivers for devices which can trigger SMBus alerts should implement
+ the optional alert() callback.
+
+
I2C Block Transactions
======================
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 0a74603..3219ee0 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -318,8 +318,9 @@
These routines read and write some bytes from/to a client. The client
contains the i2c address, so you do not have to include it. The second
parameter contains the bytes to read/write, the third the number of bytes
-to read/write (must be less than the length of the buffer.) Returned is
-the actual number of bytes read/written.
+to read/write (must be less than the length of the buffer, also should be
+less than 64k since msg.len is u16.) Returned is the actual number of bytes
+read/written.
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int num);
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 35cf64d..35c9b51 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -139,7 +139,6 @@
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
-'L' 20-2F linux/usb/vstusb.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
'M' all linux/soundcard.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fbcddc5..d80930d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1794,6 +1794,12 @@
purges which is reported from either PAL_VM_SUMMARY or
SAL PALO.
+ nr_cpus= [SMP] Maximum number of processors that an SMP kernel
+ could support. nr_cpus=n : n >= 1 limits the kernel to
+ supporting 'n' processors. Later in runtime you can not
+ use hotplug cpu feature to put more cpu back to online.
+ just like you compile the kernel NR_CPUS=n
+
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 75afa12..39c0a09 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -650,6 +650,10 @@
echo expand_toggle > /proc/acpi/ibm/video
echo video_switch > /proc/acpi/ibm/video
+NOTE: Access to this feature is restricted to processes owning the
+CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
+enough with some versions of X.org to crash it.
+
Each video output device can be enabled or disabled individually.
Reading /proc/acpi/ibm/video shows the status of each device.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 50189bf..fe5c099 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -32,6 +32,8 @@
- the Crystal LAN (CS8900/20-based) Ethernet ISA adapter driver
cxacru.txt
- Conexant AccessRunner USB ADSL Modem
+cxacru-cf.py
+ - Conexant AccessRunner USB ADSL Modem configuration file parser
de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
diff --git a/Documentation/networking/cxacru-cf.py b/Documentation/networking/cxacru-cf.py
new file mode 100644
index 0000000..b41d298
--- /dev/null
+++ b/Documentation/networking/cxacru-cf.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Copyright 2009 Simon Arlott
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Usage: cxacru-cf.py < cxacru-cf.bin
+# Output: values string suitable for the sysfs adsl_config attribute
+#
+# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
+# contains mis-aligned values which will stop the modem from being able
+# to make a connection. If the first and last two bytes are removed then
+# the values become valid, but the modulation will be forced to ANSI
+# T1.413 only which may not be appropriate.
+#
+# The original binary format is a packed list of le32 values.
+
+import sys
+import struct
+
+i = 0
+while True:
+ buf = sys.stdin.read(4)
+
+ if len(buf) == 0:
+ break
+ elif len(buf) != 4:
+ sys.stdout.write("\n")
+ sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
+ sys.exit(1)
+
+ if i > 0:
+ sys.stdout.write(" ")
+ sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
+ i += 1
+
+sys.stdout.write("\n")
diff --git a/Documentation/networking/cxacru.txt b/Documentation/networking/cxacru.txt
index b074681..2cce044 100644
--- a/Documentation/networking/cxacru.txt
+++ b/Documentation/networking/cxacru.txt
@@ -4,6 +4,12 @@
module loaded, the device will sometimes stop responding after unloading the
driver and it is necessary to unplug/remove power to the device to fix this.
+Note: support for cxacru-cf.bin has been removed. It was not loaded correctly
+so it had no effect on the device configuration. Fixing it could have stopped
+existing devices working when an invalid configuration is supplied.
+
+There is a script cxacru-cf.py to convert an existing file to the sysfs form.
+
Detected devices will appear as ATM devices named "cxacru". In /sys/class/atm/
these are directories named cxacruN where N is the device number. A symlink
named device points to the USB interface device's directory which contains
@@ -15,6 +21,15 @@
* adsl_headend_environment
Information about the remote headend.
+* adsl_config
+ Configuration writing interface.
+ Write parameters in hexadecimal format <index>=<value>,
+ separated by whitespace, e.g.:
+ "1=0 a=5"
+ Up to 7 parameters at a time will be sent and the modem will restart
+ the ADSL connection when any value is set. These are logged for future
+ reference.
+
* downstream_attenuation (dB)
* downstream_bits_per_frame
* downstream_rate (kbps)
@@ -61,6 +76,7 @@
* mac_address
* modulation
+ "" (when not connected)
"ANSI T1.413"
"ITU-T G.992.1 (G.DMT)"
"ITU-T G.992.2 (G.LITE)"
diff --git a/Documentation/usb/error-codes.txt b/Documentation/usb/error-codes.txt
index 9cf83e8..d83703e 100644
--- a/Documentation/usb/error-codes.txt
+++ b/Documentation/usb/error-codes.txt
@@ -41,8 +41,8 @@
-EFBIG Host controller driver can't schedule that many ISO frames.
--EPIPE Specified endpoint is stalled. For non-control endpoints,
- reset this status with usb_clear_halt().
+-EPIPE The pipe type specified in the URB doesn't match the
+ endpoint's actual type.
-EMSGSIZE (a) endpoint maxpacket size is zero; it is not usable
in the current interface altsetting.
@@ -60,6 +60,8 @@
-EHOSTUNREACH URB was rejected because the device is suspended.
+-ENOEXEC A control URB doesn't contain a Setup packet.
+
**************************************************************************
* Error codes returned by in urb->status *
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 3bf6818..2790ad4 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
Alan Stern <stern@rowland.harvard.edu>
- November 10, 2009
+ December 11, 2009
@@ -29,9 +29,9 @@
information about system PM).
Note: Dynamic PM support for USB is present only if the kernel was
-built with CONFIG_USB_SUSPEND enabled. System PM support is present
-only if the kernel was built with CONFIG_SUSPEND or CONFIG_HIBERNATION
-enabled.
+built with CONFIG_USB_SUSPEND enabled (which depends on
+CONFIG_PM_RUNTIME). System PM support is present only if the kernel
+was built with CONFIG_SUSPEND or CONFIG_HIBERNATION enabled.
What is Remote Wakeup?
@@ -229,6 +229,11 @@
also change the idle-delay time; 2 seconds is not the best choice for
every device.
+If a driver knows that its device has proper suspend/resume support,
+it can enable autosuspend all by itself. For example, the video
+driver for a laptop's webcam might do this, since these devices are
+rarely used and so should normally be autosuspended.
+
Sometimes it turns out that even when a device does work okay with
autosuspend there are still problems. For example, there are
experimental patches adding autosuspend support to the usbhid driver,
@@ -321,69 +326,81 @@
void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
-The functions work by maintaining a counter in the usb_interface
-structure. When intf->pm_usage_count is > 0 then the interface is
-deemed to be busy, and the kernel will not autosuspend the interface's
-device. When intf->pm_usage_count is <= 0 then the interface is
-considered to be idle, and the kernel may autosuspend the device.
+The functions work by maintaining a usage counter in the
+usb_interface's embedded device structure. When the counter is > 0
+then the interface is deemed to be busy, and the kernel will not
+autosuspend the interface's device. When the usage counter is = 0
+then the interface is considered to be idle, and the kernel may
+autosuspend the device.
-(There is a similar pm_usage_count field in struct usb_device,
+(There is a similar usage counter field in struct usb_device,
associated with the device itself rather than any of its interfaces.
-This field is used only by the USB core.)
+This counter is used only by the USB core.)
-Drivers must not modify intf->pm_usage_count directly; its value
-should be changed only be using the functions listed above. Drivers
-are responsible for insuring that the overall change to pm_usage_count
-during their lifetime balances out to 0 (it may be necessary for the
-disconnect method to call usb_autopm_put_interface() one or more times
-to fulfill this requirement). The first two routines use the PM mutex
-in struct usb_device for mutual exclusion; drivers using the async
-routines are responsible for their own synchronization and mutual
-exclusion.
+Drivers need not be concerned about balancing changes to the usage
+counter; the USB core will undo any remaining "get"s when a driver
+is unbound from its interface. As a corollary, drivers must not call
+any of the usb_autopm_* functions after their diconnect() routine has
+returned.
- usb_autopm_get_interface() increments pm_usage_count and
- attempts an autoresume if the new value is > 0 and the
- device is suspended.
+Drivers using the async routines are responsible for their own
+synchronization and mutual exclusion.
- usb_autopm_put_interface() decrements pm_usage_count and
- attempts an autosuspend if the new value is <= 0 and the
- device isn't suspended.
+ usb_autopm_get_interface() increments the usage counter and
+ does an autoresume if the device is suspended. If the
+ autoresume fails, the counter is decremented back.
+
+ usb_autopm_put_interface() decrements the usage counter and
+ attempts an autosuspend if the new value is = 0.
usb_autopm_get_interface_async() and
usb_autopm_put_interface_async() do almost the same things as
- their non-async counterparts. The differences are: they do
- not acquire the PM mutex, and they use a workqueue to do their
+ their non-async counterparts. The big difference is that they
+ use a workqueue to do the resume or suspend part of their
jobs. As a result they can be called in an atomic context,
such as an URB's completion handler, but when they return the
- device will not generally not yet be in the desired state.
+ device will generally not yet be in the desired state.
usb_autopm_get_interface_no_resume() and
usb_autopm_put_interface_no_suspend() merely increment or
- decrement the pm_usage_count value; they do not attempt to
- carry out an autoresume or an autosuspend. Hence they can be
- called in an atomic context.
+ decrement the usage counter; they do not attempt to carry out
+ an autoresume or an autosuspend. Hence they can be called in
+ an atomic context.
-The conventional usage pattern is that a driver calls
+The simplest usage pattern is that a driver calls
usb_autopm_get_interface() in its open routine and
-usb_autopm_put_interface() in its close or release routine. But
-other patterns are possible.
+usb_autopm_put_interface() in its close or release routine. But other
+patterns are possible.
The autosuspend attempts mentioned above will often fail for one
reason or another. For example, the power/level attribute might be
set to "on", or another interface in the same device might not be
idle. This is perfectly normal. If the reason for failure was that
-the device hasn't been idle for long enough, a delayed workqueue
-routine is automatically set up to carry out the operation when the
-autosuspend idle-delay has expired.
+the device hasn't been idle for long enough, a timer is scheduled to
+carry out the operation automatically when the autosuspend idle-delay
+has expired.
Autoresume attempts also can fail, although failure would mean that
the device is no longer present or operating properly. Unlike
-autosuspend, there's no delay for an autoresume.
+autosuspend, there's no idle-delay for an autoresume.
Other parts of the driver interface
-----------------------------------
+Drivers can enable autosuspend for their devices by calling
+
+ usb_enable_autosuspend(struct usb_device *udev);
+
+in their probe() routine, if they know that the device is capable of
+suspending and resuming correctly. This is exactly equivalent to
+writing "auto" to the device's power/level attribute. Likewise,
+drivers can disable autosuspend by calling
+
+ usb_disable_autosuspend(struct usb_device *udev);
+
+This is exactly the same as writing "on" to the power/level attribute.
+
Sometimes a driver needs to make sure that remote wakeup is enabled
during autosuspend. For example, there's not much point
autosuspending a keyboard if the user can't cause the keyboard to do a
@@ -395,26 +412,27 @@
Normally a driver would set this flag in its probe method, at which
time the device is guaranteed not to be autosuspended.)
-The synchronous usb_autopm_* routines have to run in a sleepable
-process context; they must not be called from an interrupt handler or
-while holding a spinlock. In fact, the entire autosuspend mechanism
-is not well geared toward interrupt-driven operation. However there
-is one thing a driver can do in an interrupt handler:
+If a driver does its I/O asynchronously in interrupt context, it
+should call usb_autopm_get_interface_async() before starting output and
+usb_autopm_put_interface_async() when the output queue drains. When
+it receives an input event, it should call
usb_mark_last_busy(struct usb_device *udev);
-This sets udev->last_busy to the current time. udev->last_busy is the
-field used for idle-delay calculations; updating it will cause any
-pending autosuspend to be moved back. The usb_autopm_* routines will
-also set the last_busy field to the current time.
+in the event handler. This sets udev->last_busy to the current time.
+udev->last_busy is the field used for idle-delay calculations;
+updating it will cause any pending autosuspend to be moved back. Most
+of the usb_autopm_* routines will also set the last_busy field to the
+current time.
-Calling urb_mark_last_busy() from within an URB completion handler is
-subject to races: The kernel may have just finished deciding the
-device has been idle for long enough but not yet gotten around to
-calling the driver's suspend method. The driver would have to be
-responsible for synchronizing its suspend method with its URB
-completion handler and causing the autosuspend to fail with -EBUSY if
-an URB had completed too recently.
+Asynchronous operation is always subject to races. For example, a
+driver may call one of the usb_autopm_*_interface_async() routines at
+a time when the core has just finished deciding the device has been
+idle for long enough but not yet gotten around to calling the driver's
+suspend method. The suspend method must be responsible for
+synchronizing with the output request routine and the URB completion
+handler; it should cause autosuspends to fail with -EBUSY if the
+driver needs to use the device.
External suspend calls should never be allowed to fail in this way,
only autosuspend calls. The driver can tell them apart by checking
@@ -422,75 +440,23 @@
method; this bit will be set for internal PM events (autosuspend) and
clear for external PM events.
-Many of the ingredients in the autosuspend framework are oriented
-towards interfaces: The usb_interface structure contains the
-pm_usage_cnt field, and the usb_autopm_* routines take an interface
-pointer as their argument. But somewhat confusingly, a few of the
-pieces (i.e., usb_mark_last_busy()) use the usb_device structure
-instead. Drivers need to keep this straight; they can call
-interface_to_usbdev() to find the device structure for a given
-interface.
+ Mutual exclusion
+ ----------------
- Locking requirements
- --------------------
-
-All three suspend/resume methods are always called while holding the
-usb_device's PM mutex. For external events -- but not necessarily for
-autosuspend or autoresume -- the device semaphore (udev->dev.sem) will
-also be held. This implies that external suspend/resume events are
-mutually exclusive with calls to probe, disconnect, pre_reset, and
-post_reset; the USB core guarantees that this is true of internal
-suspend/resume events as well.
+For external events -- but not necessarily for autosuspend or
+autoresume -- the device semaphore (udev->dev.sem) will be held when a
+suspend or resume method is called. This implies that external
+suspend/resume events are mutually exclusive with calls to probe,
+disconnect, pre_reset, and post_reset; the USB core guarantees that
+this is true of autosuspend/autoresume events as well.
If a driver wants to block all suspend/resume calls during some
-critical section, it can simply acquire udev->pm_mutex. Note that
-calls to resume may be triggered indirectly. Block IO due to memory
-allocations can make the vm subsystem resume a device. Thus while
-holding this lock you must not allocate memory with GFP_KERNEL or
-GFP_NOFS.
-
-Alternatively, if the critical section might call some of the
-usb_autopm_* routines, the driver can avoid deadlock by doing:
-
- down(&udev->dev.sem);
- rc = usb_autopm_get_interface(intf);
-
-and at the end of the critical section:
-
- if (!rc)
- usb_autopm_put_interface(intf);
- up(&udev->dev.sem);
-
-Holding the device semaphore will block all external PM calls, and the
-usb_autopm_get_interface() will prevent any internal PM calls, even if
-it fails. (Exercise: Why?)
-
-The rules for locking order are:
-
- Never acquire any device semaphore while holding any PM mutex.
-
- Never acquire udev->pm_mutex while holding the PM mutex for
- a device that isn't a descendant of udev.
-
-In other words, PM mutexes should only be acquired going up the device
-tree, and they should be acquired only after locking all the device
-semaphores you need to hold. These rules don't matter to drivers very
-much; they usually affect just the USB core.
-
-Still, drivers do need to be careful. For example, many drivers use a
-private mutex to synchronize their normal I/O activities with their
-disconnect method. Now if the driver supports autosuspend then it
-must call usb_autopm_put_interface() from somewhere -- maybe from its
-close method. It should make the call while holding the private mutex,
-since a driver shouldn't call any of the usb_autopm_* functions for an
-interface from which it has been unbound.
-
-But the usb_autpm_* routines always acquire the device's PM mutex, and
-consequently the locking order has to be: private mutex first, PM
-mutex second. Since the suspend method is always called with the PM
-mutex held, it mustn't try to acquire the private mutex. It has to
-synchronize with the driver's I/O activities in some other way.
+critical section, the best way is to lock the device and call
+usb_autopm_get_interface() (and do the reverse at the end of the
+critical section). Holding the device semaphore will block all
+external PM calls, and the usb_autopm_get_interface() will prevent any
+internal PM calls, even if it fails. (Exercise: Why?)
Interaction between dynamic PM and system PM
@@ -499,22 +465,11 @@
Dynamic power management and system power management can interact in
a couple of ways.
-Firstly, a device may already be manually suspended or autosuspended
-when a system suspend occurs. Since system suspends are supposed to
-be as transparent as possible, the device should remain suspended
-following the system resume. The 2.6.23 kernel obeys this principle
-for manually suspended devices but not for autosuspended devices; they
-do get resumed when the system wakes up. (Presumably they will be
-autosuspended again after their idle-delay time expires.) In later
-kernels this behavior will be fixed.
-
-(There is an exception. If a device would undergo a reset-resume
-instead of a normal resume, and the device is enabled for remote
-wakeup, then the reset-resume takes place even if the device was
-already suspended when the system suspend began. The justification is
-that a reset-resume is a kind of remote-wakeup event. Or to put it
-another way, a device which needs a reset won't be able to generate
-normal remote-wakeup signals, so it ought to be resumed immediately.)
+Firstly, a device may already be autosuspended when a system suspend
+occurs. Since system suspends are supposed to be as transparent as
+possible, the device should remain suspended following the system
+resume. But this theory may not work out well in practice; over time
+the kernel's behavior in this regard has changed.
Secondly, a dynamic power-management event may occur as a system
suspend is underway. The window for this is short, since system
diff --git a/MAINTAINERS b/MAINTAINERS
index 34f52a1..c6591bc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6097,6 +6097,7 @@
X86 PLATFORM DRIVERS
M: Matthew Garrett <mjg@redhat.com>
L: platform-driver-x86@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
S: Maintained
F: drivers/platform/x86
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea6..b9e3e33 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ALPHA_LOCAL_H */
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 426ae94..193bd33 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -445,6 +445,8 @@
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+CONFIG_PHONET=y
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -1325,27 +1327,34 @@
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_M66592 is not set
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
# CONFIG_USB_GADGET_CI13XXX is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
# CONFIG_USB_ZERO_HNPTEST is not set
+# CONFIG_USB_AUDIO is not set
# CONFIG_USB_ETH is not set
# CONFIG_USB_GADGETFS is not set
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_USB_G_NOKIA=m
+# CONFIG_USB_G_MULTI is not set
#
# OTG and related infrastructure
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index 3d398ce..3956d82 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/dma-mapping.h>
#include <mach/irqs.h>
#include <mach/hardware.h>
@@ -292,7 +293,7 @@
.num_resources = ARRAY_SIZE(mxc_fb),
.resource = mxc_fb,
.dev = {
- .coherent_dma_mask = 0xFFFFFFFF,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -395,17 +396,17 @@
},
};
-static u64 mxc_sdhc1_dmamask = 0xffffffffUL;
+static u64 mxc_sdhc1_dmamask = DMA_BIT_MASK(32);
struct platform_device mxc_sdhc_device0 = {
- .name = "mxc-mmc",
- .id = 0,
- .dev = {
- .dma_mask = &mxc_sdhc1_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(mxc_sdhc1_resources),
- .resource = mxc_sdhc1_resources,
+ .name = "mxc-mmc",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mxc_sdhc1_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mxc_sdhc1_resources),
+ .resource = mxc_sdhc1_resources,
};
static struct resource mxc_sdhc2_resources[] = {
@@ -424,17 +425,17 @@
},
};
-static u64 mxc_sdhc2_dmamask = 0xffffffffUL;
+static u64 mxc_sdhc2_dmamask = DMA_BIT_MASK(32);
struct platform_device mxc_sdhc_device1 = {
- .name = "mxc-mmc",
- .id = 1,
- .dev = {
- .dma_mask = &mxc_sdhc2_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(mxc_sdhc2_resources),
- .resource = mxc_sdhc2_resources,
+ .name = "mxc-mmc",
+ .id = 1,
+ .dev = {
+ .dma_mask = &mxc_sdhc2_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mxc_sdhc2_resources),
+ .resource = mxc_sdhc2_resources,
};
#ifdef CONFIG_MACH_MX27
@@ -450,7 +451,7 @@
},
};
-static u64 otg_dmamask = 0xffffffffUL;
+static u64 otg_dmamask = DMA_BIT_MASK(32);
/* OTG gadget device */
struct platform_device mxc_otg_udc_device = {
@@ -458,7 +459,7 @@
.id = -1,
.dev = {
.dma_mask = &otg_dmamask,
- .coherent_dma_mask = 0xffffffffUL,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = otg_resources,
.num_resources = ARRAY_SIZE(otg_resources),
@@ -469,7 +470,7 @@
.name = "mxc-ehci",
.id = 0,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &otg_dmamask,
},
.resource = otg_resources,
@@ -478,7 +479,7 @@
/* USB host 1 */
-static u64 usbh1_dmamask = 0xffffffffUL;
+static u64 usbh1_dmamask = DMA_BIT_MASK(32);
static struct resource mxc_usbh1_resources[] = {
{
@@ -496,7 +497,7 @@
.name = "mxc-ehci",
.id = 1,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &usbh1_dmamask,
},
.resource = mxc_usbh1_resources,
@@ -504,7 +505,7 @@
};
/* USB host 2 */
-static u64 usbh2_dmamask = 0xffffffffUL;
+static u64 usbh2_dmamask = DMA_BIT_MASK(32);
static struct resource mxc_usbh2_resources[] = {
{
@@ -522,7 +523,7 @@
.name = "mxc-ehci",
.id = 2,
.dev = {
- .coherent_dma_mask = 0xffffffff,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &usbh2_dmamask,
},
.resource = mxc_usbh2_resources,
@@ -642,3 +643,30 @@
{
return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
}
+
+#ifdef CONFIG_MACH_MX21
+static struct resource mx21_usbhc_resources[] = {
+ {
+ .start = USBOTG_BASE_ADDR,
+ .end = USBOTG_BASE_ADDR + 0x1FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MXC_INT_USBHOST,
+ .end = MXC_INT_USBHOST,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mx21_usbhc_device = {
+ .name = "imx21-hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mx21_usbhc_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(mx21_usbhc_resources),
+ .resource = mx21_usbhc_resources,
+};
+#endif
+
diff --git a/arch/arm/mach-mx2/devices.h b/arch/arm/mach-mx2/devices.h
index 97306aa..f12694b 100644
--- a/arch/arm/mach-mx2/devices.h
+++ b/arch/arm/mach-mx2/devices.h
@@ -26,5 +26,6 @@
extern struct platform_device mxc_spi_device0;
extern struct platform_device mxc_spi_device1;
extern struct platform_device mxc_spi_device2;
+extern struct platform_device mx21_usbhc_device;
extern struct platform_device imx_ssi_device0;
extern struct platform_device imx_ssi_device1;
diff --git a/arch/arm/plat-mxc/include/mach/mx21-usbhost.h b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
new file mode 100644
index 0000000..22d0b59
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mx21-usbhost.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MX21_USBH
+#define __ASM_ARCH_MX21_USBH
+
+enum mx21_usbh_xcvr {
+ /* Values below as used by hardware (HWMODE register) */
+ MX21_USBXCVR_TXDIF_RXDIF = 0,
+ MX21_USBXCVR_TXDIF_RXSE = 1,
+ MX21_USBXCVR_TXSE_RXDIF = 2,
+ MX21_USBXCVR_TXSE_RXSE = 3,
+};
+
+struct mx21_usbh_platform_data {
+ enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
+ enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
+ u16 enable_host1:1,
+ enable_host2:1,
+ enable_otg_host:1, /* enable "OTG" port (as host) */
+ host1_xcverless:1, /* traceiverless host1 port */
+ host1_txenoe:1, /* output enable host1 transmit enable */
+ otg_ext_xcvr:1, /* external tranceiver for OTG port */
+ unused:10;
+};
+
+#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index b13d187..3a4bc1a 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1770,10 +1770,13 @@
ARRAY_SIZE(usba0_resource)))
goto out_free_pdev;
- if (data)
+ if (data) {
usba_data.pdata.vbus_pin = data->vbus_pin;
- else
+ usba_data.pdata.vbus_pin_inverted = data->vbus_pin_inverted;
+ } else {
usba_data.pdata.vbus_pin = -EINVAL;
+ usba_data.pdata.vbus_pin_inverted = -EINVAL;
+ }
data = &usba_data.pdata;
data->num_ep = ARRAY_SIZE(at32_usba_ep);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index b0ed0b4..01b2f58 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -816,8 +816,8 @@
ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
- p2.l = _per_cpu__ipipe_percpu_domain;
- p2.h = _per_cpu__ipipe_percpu_domain;
+ p2.l = _ipipe_percpu_domain;
+ p2.h = _ipipe_percpu_domain;
r0.l = _ipipe_root;
r0.h = _ipipe_root;
r2 = [p2];
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 2c18d08..c52bef3 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -358,7 +358,7 @@
1: btstq 12, $r1 ; Refill?
bpl 2f
lsrq 24, $r1 ; Get PGD index (bit 24-31)
- move.d [per_cpu__current_pgd], $r0 ; PGD for the current process
+ move.d [current_pgd], $r0 ; PGD for the current process
move.d [$r0+$r1.d], $r0 ; Get PMD
beq 2f
nop
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 2238d15..f125d91 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,7 +115,7 @@
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
- move.d per_cpu__current_pgd, $acr ; PGD
+ move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf465..f7c00a5 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
-# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
+# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
@@ -39,7 +39,7 @@
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
-#define __ia64_per_cpu_var(var) per_cpu__##var
+#define __ia64_per_cpu_var(var) var
#include <asm-generic/percpu.h>
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8..baa74c8 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@
return !(ia64_psr(regs)->i);
}
-static inline void handle_irq(int irq, struct pt_regs *regs)
-{
- __do_IRQ(irq);
-}
#define irq_ctx_init(cpu) do { } while (0)
#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c16fb03..a7ca07f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -852,8 +852,8 @@
possible = available_cpus + additional_cpus;
- if (possible > NR_CPUS)
- possible = NR_CPUS;
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max((possible - available_cpus), 0));
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 461b999..7f4a0ed 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
+EXPORT_SYMBOL(local_per_cpu_offset);
#endif
#include <asm/uaccess.h>
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 19c4b21..8d586d1 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -459,7 +459,7 @@
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
+ ((char *)&ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d1..734bca8 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@
* a variable, not an address.
*/
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non local way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* __M32R_LOCAL_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd2..ec89f2a 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
* places
*/
-#define PER_CPU(var) per_cpu__##var
+#define PER_CPU(var) var
# ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 361f4f1..bdcdef0 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -193,29 +193,4 @@
#define __local_add(i, l) ((l)->a.counter+=(i))
#define __local_sub(i, l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index d172d42..f8c45cc 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -36,8 +36,8 @@
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t1
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t1
/* t1 = &__get_cpu_var(exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
#else
.macro get_fault_ip t1 t2
/* t1 = &__get_cpu_var(exception_data) */
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t2
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index ce58c80..c2410af 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -172,29 +172,4 @@
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index d242a73..b287b62 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -21,7 +21,6 @@
#include <asm/perf_event.h>
#include <asm/ptrace.h>
-#include <asm/local.h>
#include <asm/pcr.h>
/* We don't have a real NMI on sparc64, but we can fake one
@@ -113,13 +112,13 @@
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4..1ddec40 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -149,11 +149,11 @@
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
- sethi %hi(per_cpu____cpu_data), %l0
- lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
+ sethi %hi(__cpu_data), %l0
+ lduw [%l0 + %lo(__cpu_data)], %l1
#else
- sethi %hi(per_cpu____cpu_data), %l0
- or %l0, %lo(per_cpu____cpu_data), %l0
+ sethi %hi(__cpu_data), %l0
+ or %l0, %lo(__cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0896008..57ccdce 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -184,6 +184,9 @@
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
+config HAVE_EARLY_RES
+ def_bool y
+
config HAVE_INTEL_TXT
def_bool y
depends on EXPERIMENTAL && DMAR && ACPI
@@ -569,6 +572,18 @@
Enable to debug paravirt_ops internals. Specifically, BUG if
a paravirt_op is missing when it is called.
+config NO_BOOTMEM
+ default y
+ bool "Disable Bootmem code"
+ ---help---
+ Use early_res directly instead of bootmem before slab is ready.
+ - allocator (buddy) [generic]
+ - early allocator (bootmem) [generic]
+ - very early allocator (reserve_early*()) [x86]
+ - very very early allocator (early brk model) [x86]
+ So reduce one layer between early allocator to final allocator
+
+
config MEMTEST
bool "Memtest"
---help---
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9046e4a..280c019 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
current->mm->cached_hole_size = 0;
- current->mm->mmap = NULL;
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 761249e..0e22296 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -111,11 +111,8 @@
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-extern void reserve_early(u64 start, u64 end, char *name);
-extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
-extern void free_early(u64 start, u64 end);
-extern void early_res_to_bootmem(u64 start, u64 end);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+#include <linux/early_res.h>
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 014c2b8..a726650 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -66,10 +66,6 @@
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-#ifndef CONFIG_PARAVIRT
-#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
-#endif
-
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091..7ec65b1 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
-extern spinlock_t i8259A_lock;
+extern raw_spinlock_t i8259A_lock;
extern void init_8259A(int auto_eoi);
extern void enable_8259A_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7c7c16c..5f61f6e 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -160,6 +160,7 @@
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
+void setup_IO_APIC_irq_extra(u32 gsi);
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380..2622927 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@
extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
+extern int nr_legacy_irqs;
#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f08..8767d99 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
#define MCE_VECTOR 0x12
/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
+ * IDT vectors usable for external interrupt sources start at 0x20.
+ * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
*/
#define FIRST_EXTERNAL_VECTOR 0x20
-
-#ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR 0x80
-# define IA32_SYSCALL_VECTOR 0x80
-#else
-# define IA32_SYSCALL_VECTOR 0x80
-#endif
+/*
+ * We start allocating at 0x21 to spread out vectors evenly between
+ * priority levels. (0x80 is the syscall vector)
+ */
+#define VECTOR_OFFSET_START 1
/*
- * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
- * cleanup after irq migration.
+ * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
+ * triggering cleanup after irq migration. 0x21-0x2f will still be used
+ * for device interrupts.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+#define IA32_SYSCALL_VECTOR 0x80
+#ifdef CONFIG_X86_32
+# define SYSCALL_VECTOR 0x80
+#endif
+
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
+ * round up to the next 16-vector boundary
*/
-#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
+#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
*/
#define MCE_SELF_VECTOR 0xeb
-/*
- * First APIC vector available to drivers: (vectors 0x30-0xee) we
- * start at 0x31(0x41) to spread out vectors evenly between priority
- * levels. (0x80 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-
#define NR_VECTORS 256
#define FPU_IRQ 13
@@ -154,21 +152,21 @@
#define NR_IRQS_LEGACY 16
-#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_X86_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
+# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
-# if NR_CPUS < MAX_IO_APICS
-# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
-# else
-# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
-# endif
+# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
+# define NR_IRQS \
+ (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# endif
#else /* !CONFIG_X86_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 47b9b6f..2e99724 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -195,41 +195,4 @@
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- *
- * X86_64: This could be done better if we moved the per cpu data directly
- * after GS.
- */
-
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
-({ \
- local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; \
-})
-#define cpu_local_wrap(l) \
-({ \
- preempt_disable(); \
- (l); \
- preempt_enable(); \
-}) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
-
-#define __cpu_local_inc(l) cpu_local_inc((l))
-#define __cpu_local_dec(l) cpu_local_dec((l))
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ASM_X86_LOCAL_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dd59a85..5653f43 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,15 +435,6 @@
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
-#ifdef CONFIG_HIGHPTE
-static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
-{
- unsigned long ret;
- ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
- return (void *)ret;
-}
-#endif
-
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b1e70d5..db9ef55 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -304,10 +304,6 @@
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
struct pv_lazy_ops lazy_mode;
/* dom0 ops */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index ada8c20..b4a00dd 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -124,6 +124,8 @@
#include "pci_64.h"
#endif
+void dma32_reserve_bootmem(void);
+
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index ae5e40f..fe15cfb 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -22,8 +22,6 @@
extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
int reg, int len, u32 value);
-extern void dma32_reserve_bootmem(void);
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196..66a272d 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
*/
#ifdef CONFIG_SMP
#define PER_CPU(var, reg) \
- __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
- lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
+ __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
+ lea var(reg), reg
+#define PER_CPU_VAR(var) %__percpu_seg:var
#else /* ! SMP */
-#define PER_CPU(var, reg) \
- __percpu_mov_op $per_cpu__##var, reg
-#define PER_CPU_VAR(var) per_cpu__##var
+#define PER_CPU(var, reg) __percpu_mov_op $var, reg
+#define PER_CPU_VAR(var) var
#endif /* SMP */
#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
-#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#define INIT_PER_CPU_VAR(var) var
#endif
#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
- extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+ extern typeof(var) init_per_cpu_var(var)
#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var
#else
-#define init_per_cpu_var(var) per_cpu_var(var)
+#define init_per_cpu_var(var) var
#endif
/* For arch-specific code, we can use direct single-insn ops (they
@@ -104,6 +103,64 @@
} \
} while (0)
+/*
+ * Generate a percpu add to memory instruction and optimize code
+ * if a one is added or subtracted.
+ */
+#define percpu_add_op(var, val) \
+do { \
+ typedef typeof(var) pao_T__; \
+ const int pao_ID__ = (__builtin_constant_p(val) && \
+ ((val) == 1 || (val) == -1)) ? (val) : 0; \
+ if (0) { \
+ pao_T__ pao_tmp__; \
+ pao_tmp__ = (val); \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ if (pao_ID__ == 1) \
+ asm("incb "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decb "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addb %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "qi" ((pao_T__)(val))); \
+ break; \
+ case 2: \
+ if (pao_ID__ == 1) \
+ asm("incw "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decw "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addw %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 4: \
+ if (pao_ID__ == 1) \
+ asm("incl "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decl "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addl %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 8: \
+ if (pao_ID__ == 1) \
+ asm("incq "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decq "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addq %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((pao_T__)(val))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
#define percpu_from_op(op, var, constraint) \
({ \
typeof(var) pfo_ret__; \
@@ -142,16 +199,14 @@
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
- "m" (per_cpu__##var))
-#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
- "p" (&per_cpu__##var))
-#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
-#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
-#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
+#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
+#define percpu_write(var, val) percpu_to_op("mov", var, val)
+#define percpu_add(var, val) percpu_add_op(var, val)
+#define percpu_sub(var, val) percpu_add_op(var, -(val))
+#define percpu_and(var, val) percpu_to_op("and", var, val)
+#define percpu_or(var, val) percpu_to_op("or", var, val)
+#define percpu_xor(var, val) percpu_to_op("xor", var, val)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -160,9 +215,9 @@
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -179,9 +234,9 @@
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -192,9 +247,9 @@
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -212,19 +267,19 @@
#ifdef CONFIG_X86_64
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
@@ -236,7 +291,7 @@
({ \
int old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (per_cpu__##var) \
+ : "=r" (old__), "+m" (var) \
: "dIr" (bit)); \
old__; \
})
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index a286683..47339a1 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -54,10 +54,10 @@
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
pte_index((address)))
#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 4009f65..6f414ed 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -23,14 +23,4 @@
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x,y) ((__typeof__(x))((y)-1))
-#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
-#define round_down(x,y) ((x) & ~__round_mask(x,y))
-
#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index e04740f..b8fe48e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -32,7 +32,7 @@
"movl %P[task_canary](%[next]), %%ebx\n\t" \
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
#define __switch_canary_oparam \
- , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
+ , [stack_canary] "=m" (stack_canary.canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -114,7 +114,7 @@
"movq %P[task_canary](%%rsi),%%r8\n\t" \
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
#define __switch_canary_oparam \
- , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+ , [gs_canary] "=m" (irq_stack_union.stack_canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -133,7 +133,7 @@
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
"movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
@@ -143,7 +143,7 @@
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
- [current_task] "m" (per_cpu_var(current_task)) \
+ [current_task] "m" (current_task) \
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f957030..738fcb6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
*irq = gsi;
+
+#ifdef CONFIG_X86_IO_APIC
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
+ setup_IO_APIC_irq_extra(gsi);
+#endif
+
return 0;
}
@@ -474,7 +480,8 @@
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- acpi_gsi_to_irq(plat_gsi, &irq);
+ irq = plat_gsi;
+
return irq;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6bdd2c7..14862f1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
*/
int sis_apic_bug = -1;
-static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+static DEFINE_RAW_SPINLOCK(ioapic_lock);
+static DEFINE_RAW_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -94,8 +94,6 @@
/* # of MP IRQ source entries */
int mp_irq_entries;
-/* Number of legacy interrupts */
-static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
/* GSI interrupts */
static int nr_irqs_gsi = NR_IRQS_LEGACY;
@@ -140,27 +138,10 @@
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
#ifdef CONFIG_SPARSE_IRQ
-static struct irq_cfg irq_cfgx[] = {
+static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
#else
-static struct irq_cfg irq_cfgx[NR_IRQS] = {
+static struct irq_cfg irq_cfgx[NR_IRQS];
#endif
- [0] = { .vector = IRQ0_VECTOR, },
- [1] = { .vector = IRQ1_VECTOR, },
- [2] = { .vector = IRQ2_VECTOR, },
- [3] = { .vector = IRQ3_VECTOR, },
- [4] = { .vector = IRQ4_VECTOR, },
- [5] = { .vector = IRQ5_VECTOR, },
- [6] = { .vector = IRQ6_VECTOR, },
- [7] = { .vector = IRQ7_VECTOR, },
- [8] = { .vector = IRQ8_VECTOR, },
- [9] = { .vector = IRQ9_VECTOR, },
- [10] = { .vector = IRQ10_VECTOR, },
- [11] = { .vector = IRQ11_VECTOR, },
- [12] = { .vector = IRQ12_VECTOR, },
- [13] = { .vector = IRQ13_VECTOR, },
- [14] = { .vector = IRQ14_VECTOR, },
- [15] = { .vector = IRQ15_VECTOR, },
-};
void __init io_apic_disable_legacy(void)
{
@@ -185,8 +166,14 @@
desc->chip_data = &cfg[i];
zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
- if (i < nr_legacy_irqs)
- cpumask_setall(cfg[i].domain);
+ /*
+ * For legacy IRQ's, start with assigning irq0 to irq15 to
+ * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
+ */
+ if (i < nr_legacy_irqs) {
+ cfg[i].vector = IRQ0_VECTOR + i;
+ cpumask_set_cpu(0, cfg[i].domain);
+ }
}
return 0;
@@ -406,7 +393,7 @@
struct irq_pin_list *entry;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
int pin;
@@ -415,11 +402,11 @@
reg = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
if (reg & IO_APIC_REDIR_REMOTE_IRR) {
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return false;
}
@@ -433,10 +420,10 @@
{
union entry_union eu;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return eu.entry;
}
@@ -459,9 +446,9 @@
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -474,10 +461,10 @@
unsigned long flags;
union entry_union eu = { .entry.mask = 1 };
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -604,9 +591,9 @@
BUG_ON(!cfg);
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__mask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -614,9 +601,9 @@
struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void mask_IO_APIC_irq(unsigned int irq)
@@ -1140,12 +1127,12 @@
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
- spin_lock(&vector_lock);
+ raw_spin_lock(&vector_lock);
}
void unlock_vector_lock(void)
{
- spin_unlock(&vector_lock);
+ raw_spin_unlock(&vector_lock);
}
static int
@@ -1162,7 +1149,8 @@
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 8;
unsigned int old_vector;
int cpu, err;
cpumask_var_t tmp_mask;
@@ -1198,7 +1186,7 @@
if (vector >= first_system_vector) {
/* If out of vectors on large boxen, must share them. */
offset = (offset + 1) % 8;
- vector = FIRST_DEVICE_VECTOR + offset;
+ vector = FIRST_EXTERNAL_VECTOR + offset;
}
if (unlikely(current_vector == vector))
continue;
@@ -1232,9 +1220,9 @@
int err;
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
err = __assign_irq_vector(irq, cfg, mask);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
return err;
}
@@ -1268,11 +1256,16 @@
void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
- /* This function must be called with vector_lock held */
int irq, vector;
struct irq_cfg *cfg;
struct irq_desc *desc;
+ /*
+ * vector_lock will make sure that we don't run into irq vector
+ * assignments that might be happening on another cpu in parallel,
+ * while we setup our initial vector to irq mappings.
+ */
+ raw_spin_lock(&vector_lock);
/* Mark the inuse vectors */
for_each_irq_desc(irq, desc) {
cfg = desc->chip_data;
@@ -1291,6 +1284,7 @@
if (!cpumask_test_cpu(cpu, cfg->domain))
per_cpu(vector_irq, cpu)[vector] = -1;
}
+ raw_spin_unlock(&vector_lock);
}
static struct irq_chip ioapic_chip;
@@ -1440,6 +1434,14 @@
cfg = desc->chip_data;
+ /*
+ * For legacy irqs, cfg->domain starts with cpu 0 for legacy
+ * controllers like 8259. Now that IO-APIC can handle this irq, update
+ * the cfg->domain.
+ */
+ if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
+ apic->vector_allocation_domain(0, cfg->domain);
+
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
@@ -1473,7 +1475,7 @@
static void __init setup_IO_APIC_irqs(void)
{
- int apic_id = 0, pin, idx, irq;
+ int apic_id, pin, idx, irq;
int notcon = 0;
struct irq_desc *desc;
struct irq_cfg *cfg;
@@ -1481,14 +1483,7 @@
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- apic_id = mp_find_ioapic(0);
- if (apic_id < 0)
- apic_id = 0;
- }
-#endif
-
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
idx = find_irq_entry(apic_id, pin, mp_INT);
if (idx == -1) {
@@ -1510,6 +1505,9 @@
irq = pin_2_irq(idx, apic_id, pin);
+ if ((apic_id > 0) && (irq > 16))
+ continue;
+
/*
* Skip the timer IRQ if there's a quirk handler
* installed and if it returns 1:
@@ -1539,6 +1537,56 @@
}
/*
+ * for the gsit that is not in first ioapic
+ * but could not use acpi_register_gsi()
+ * like some special sci in IBM x3330
+ */
+void setup_IO_APIC_irq_extra(u32 gsi)
+{
+ int apic_id = 0, pin, idx, irq;
+ int node = cpu_to_node(boot_cpu_id);
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+
+ /*
+ * Convert 'gsi' to 'ioapic.pin'.
+ */
+ apic_id = mp_find_ioapic(gsi);
+ if (apic_id < 0)
+ return;
+
+ pin = mp_find_ioapic_pin(apic_id, gsi);
+ idx = find_irq_entry(apic_id, pin, mp_INT);
+ if (idx == -1)
+ return;
+
+ irq = pin_2_irq(idx, apic_id, pin);
+#ifdef CONFIG_SPARSE_IRQ
+ desc = irq_to_desc(irq);
+ if (desc)
+ return;
+#endif
+ desc = irq_to_desc_alloc_node(irq, node);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ return;
+ }
+
+ cfg = desc->chip_data;
+ add_pin_to_irq_node(cfg, node, apic_id, pin);
+
+ if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
+ pr_debug("Pin %d-%d already programmed\n",
+ mp_ioapics[apic_id].apicid, pin);
+ return;
+ }
+ set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
+
+ setup_IO_APIC_irq(apic_id, pin, irq, desc,
+ irq_trigger(idx), irq_polarity(idx));
+}
+
+/*
* Set up the timer pin, possibly with the 8259A-master behind.
*/
static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1601,14 +1649,14 @@
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0);
reg_01.raw = io_apic_read(apic, 1);
if (reg_01.bits.version >= 0x10)
reg_02.raw = io_apic_read(apic, 2);
if (reg_01.bits.version >= 0x20)
reg_03.raw = io_apic_read(apic, 3);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1830,7 +1878,7 @@
printk(KERN_DEBUG "\nprinting PIC contents\n");
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
v = inb(0xa1) << 8 | inb(0x21);
printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1844,7 +1892,7 @@
outb(0x0a,0xa0);
outb(0x0a,0x20);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
@@ -1903,9 +1951,9 @@
* The number of IO-APIC IRQ registers (== #pins):
*/
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(apic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
}
@@ -2045,9 +2093,9 @@
for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
/* Read the register 0 value */
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
old_id = mp_ioapics[apic_id].apicid;
@@ -2106,16 +2154,16 @@
mp_ioapics[apic_id].apicid);
reg_00.bits.ID = mp_ioapics[apic_id].apicid;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic_id, 0, reg_00.raw);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
/*
* Sanity check
*/
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
printk("could not set ID!\n");
else
@@ -2198,7 +2246,7 @@
unsigned long flags;
struct irq_cfg *cfg;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
if (irq < nr_legacy_irqs) {
disable_8259A_irq(irq);
if (i8259A_irq_pending(irq))
@@ -2206,7 +2254,7 @@
}
cfg = irq_cfg(irq);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
@@ -2217,9 +2265,9 @@
struct irq_cfg *cfg = irq_cfg(irq);
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
@@ -2312,14 +2360,14 @@
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
ret = set_desc_affinity(desc, mask, &dest);
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
@@ -2554,9 +2602,9 @@
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ack_apic_level(unsigned int irq)
@@ -3138,13 +3186,13 @@
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
ioapic_write_entry(dev->id, i, entry[i]);
@@ -3207,7 +3255,7 @@
if (irq_want < nr_irqs_gsi)
irq_want = nr_irqs_gsi;
- spin_lock_irqsave(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < nr_irqs; new++) {
desc_new = irq_to_desc_alloc_node(new, node);
if (!desc_new) {
@@ -3226,14 +3274,11 @@
irq = new;
break;
}
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
- if (irq > 0) {
- dynamic_irq_init(irq);
- /* restore it, in case dynamic_irq_init clear it */
- if (desc_new)
- desc_new->chip_data = cfg_new;
- }
+ if (irq > 0)
+ dynamic_irq_init_keep_chip_data(irq);
+
return irq;
}
@@ -3255,20 +3300,13 @@
void destroy_irq(unsigned int irq)
{
unsigned long flags;
- struct irq_cfg *cfg;
- struct irq_desc *desc;
- /* store it, in case dynamic_irq_cleanup clear it */
- desc = irq_to_desc(irq);
- cfg = desc->chip_data;
- dynamic_irq_cleanup(irq);
- /* connect back irq_cfg */
- desc->chip_data = cfg;
+ dynamic_irq_cleanup_keep_chip_data(irq);
free_irte(irq);
- spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq, cfg);
- spin_unlock_irqrestore(&vector_lock, flags);
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ __clear_irq_vector(irq, get_irq_chip_data(irq));
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
}
/*
@@ -3805,9 +3843,9 @@
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.entries;
}
@@ -3969,9 +4007,9 @@
if (physids_empty(apic_id_map))
apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (apic_id >= get_physical_broadcast()) {
printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4005,10 +4043,10 @@
if (reg_00.bits.ID != apic_id) {
reg_00.bits.ID = apic_id;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0, reg_00.raw);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
/* Sanity check */
if (reg_00.bits.ID != apic_id) {
@@ -4029,9 +4067,9 @@
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.version;
}
@@ -4063,27 +4101,23 @@
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
- int pin, ioapic = 0, irq, irq_entry;
+ int pin, ioapic, irq, irq_entry;
struct irq_desc *desc;
const struct cpumask *mask;
if (skip_ioapic_setup == 1)
return;
-#ifdef CONFIG_ACPI
- if (!acpi_disabled && acpi_ioapic) {
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- ioapic = 0;
- }
-#endif
-
+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
+ if ((ioapic > 0) && (irq > 16))
+ continue;
+
desc = irq_to_desc(irq);
/*
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 0159a69..bd7c96b 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@
/* We can be called before check_nmi_watchdog, hence NULL check. */
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+ static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
- spin_lock(&lock);
+ raw_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
show_regs(regs);
dump_stack();
- spin_unlock(&lock);
+ raw_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
rc = 1;
@@ -438,8 +438,8 @@
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 09b1698..06130b5 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -22,10 +22,10 @@
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
-#include <linux/sort.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/kvm_para.h>
+#include <linux/range.h>
#include <asm/processor.h>
#include <asm/e820.h>
@@ -34,11 +34,6 @@
#include "mtrr.h"
-struct res_range {
- unsigned long start;
- unsigned long end;
-};
-
struct var_mtrr_range_state {
unsigned long base_pfn;
unsigned long size_pfn;
@@ -56,7 +51,7 @@
/* Should be related to MTRR_VAR_RANGES nums */
#define RANGE_NUM 256
-static struct res_range __initdata range[RANGE_NUM];
+static struct range __initdata range[RANGE_NUM];
static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
@@ -64,152 +59,11 @@
static int __initdata debug_print;
#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
-
-static int __init
-add_range(struct res_range *range, int nr_range,
- unsigned long start, unsigned long end)
-{
- /* Out of slots: */
- if (nr_range >= RANGE_NUM)
- return nr_range;
-
- range[nr_range].start = start;
- range[nr_range].end = end;
-
- nr_range++;
-
- return nr_range;
-}
-
-static int __init
-add_range_with_merge(struct res_range *range, int nr_range,
- unsigned long start, unsigned long end)
-{
- int i;
-
- /* Try to merge it with old one: */
- for (i = 0; i < nr_range; i++) {
- unsigned long final_start, final_end;
- unsigned long common_start, common_end;
-
- if (!range[i].end)
- continue;
-
- common_start = max(range[i].start, start);
- common_end = min(range[i].end, end);
- if (common_start > common_end + 1)
- continue;
-
- final_start = min(range[i].start, start);
- final_end = max(range[i].end, end);
-
- range[i].start = final_start;
- range[i].end = final_end;
- return nr_range;
- }
-
- /* Need to add it: */
- return add_range(range, nr_range, start, end);
-}
-
-static void __init
-subtract_range(struct res_range *range, unsigned long start, unsigned long end)
-{
- int i, j;
-
- for (j = 0; j < RANGE_NUM; j++) {
- if (!range[j].end)
- continue;
-
- if (start <= range[j].start && end >= range[j].end) {
- range[j].start = 0;
- range[j].end = 0;
- continue;
- }
-
- if (start <= range[j].start && end < range[j].end &&
- range[j].start < end + 1) {
- range[j].start = end + 1;
- continue;
- }
-
-
- if (start > range[j].start && end >= range[j].end &&
- range[j].end > start - 1) {
- range[j].end = start - 1;
- continue;
- }
-
- if (start > range[j].start && end < range[j].end) {
- /* Find the new spare: */
- for (i = 0; i < RANGE_NUM; i++) {
- if (range[i].end == 0)
- break;
- }
- if (i < RANGE_NUM) {
- range[i].end = range[j].end;
- range[i].start = end + 1;
- } else {
- printk(KERN_ERR "run of slot in ranges\n");
- }
- range[j].end = start - 1;
- continue;
- }
- }
-}
-
-static int __init cmp_range(const void *x1, const void *x2)
-{
- const struct res_range *r1 = x1;
- const struct res_range *r2 = x2;
- long start1, start2;
-
- start1 = r1->start;
- start2 = r2->start;
-
- return start1 - start2;
-}
-
-static int __init clean_sort_range(struct res_range *range, int az)
-{
- int i, j, k = az - 1, nr_range = 0;
-
- for (i = 0; i < k; i++) {
- if (range[i].end)
- continue;
- for (j = k; j > i; j--) {
- if (range[j].end) {
- k = j;
- break;
- }
- }
- if (j == i)
- break;
- range[i].start = range[k].start;
- range[i].end = range[k].end;
- range[k].start = 0;
- range[k].end = 0;
- k--;
- }
- /* count it */
- for (i = 0; i < az; i++) {
- if (!range[i].end) {
- nr_range = i;
- break;
- }
- }
-
- /* sort them */
- sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
-
- return nr_range;
-}
-
#define BIOS_BUG_MSG KERN_WARNING \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
-x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
+x86_get_mtrr_mem_range(struct range *range, int nr_range,
unsigned long extra_remove_base,
unsigned long extra_remove_size)
{
@@ -223,14 +77,14 @@
continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
- nr_range = add_range_with_merge(range, nr_range, base,
- base + size - 1);
+ nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
+ base, base + size);
}
if (debug_print) {
printk(KERN_DEBUG "After WB checking\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
/* Take out UC ranges: */
@@ -252,19 +106,19 @@
size -= (1<<(20-PAGE_SHIFT)) - base;
base = 1<<(20-PAGE_SHIFT);
}
- subtract_range(range, base, base + size - 1);
+ subtract_range(range, RANGE_NUM, base, base + size);
}
if (extra_remove_size)
- subtract_range(range, extra_remove_base,
- extra_remove_base + extra_remove_size - 1);
+ subtract_range(range, RANGE_NUM, extra_remove_base,
+ extra_remove_base + extra_remove_size);
if (debug_print) {
printk(KERN_DEBUG "After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
}
@@ -273,26 +127,22 @@
if (debug_print) {
printk(KERN_DEBUG "After sorting\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
- range[i].start, range[i].end + 1);
+ printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ range[i].start, range[i].end);
}
- /* clear those is not used */
- for (i = nr_range; i < RANGE_NUM; i++)
- memset(&range[i], 0, sizeof(range[i]));
-
return nr_range;
}
#ifdef CONFIG_MTRR_SANITIZER
-static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
+static unsigned long __init sum_ranges(struct range *range, int nr_range)
{
unsigned long sum = 0;
int i;
for (i = 0; i < nr_range; i++)
- sum += range[i].end + 1 - range[i].start;
+ sum += range[i].end - range[i].start;
return sum;
}
@@ -621,7 +471,7 @@
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
static int __init
-x86_setup_var_mtrrs(struct res_range *range, int nr_range,
+x86_setup_var_mtrrs(struct range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{
struct var_mtrr_state var_state;
@@ -639,7 +489,7 @@
/* Write the range: */
for (i = 0; i < nr_range; i++) {
set_var_mtrr_range(&var_state, range[i].start,
- range[i].end - range[i].start + 1);
+ range[i].end - range[i].start);
}
/* Write the last range: */
@@ -742,7 +592,7 @@
unsigned long x_remove_base,
unsigned long x_remove_size, int i)
{
- static struct res_range range_new[RANGE_NUM];
+ static struct range range_new[RANGE_NUM];
unsigned long range_sums_new;
static int nr_range_new;
int num_reg;
@@ -869,10 +719,10 @@
* [0, 1M) should always be covered by var mtrr with WB
* and fixed mtrrs should take effect before var mtrr for it:
*/
- nr_range = add_range_with_merge(range, nr_range, 0,
- (1ULL<<(20 - PAGE_SHIFT)) - 1);
+ nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+ 1ULL<<(20 - PAGE_SHIFT));
/* Sort the ranges: */
- sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
+ sort_range(range, nr_range);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM covered: %ldM\n",
@@ -1089,9 +939,9 @@
nr_range = 0;
if (mtrr_tom2) {
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
- range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
- if (highest_pfn < range[nr_range].end + 1)
- highest_pfn = range[nr_range].end + 1;
+ range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
+ if (highest_pfn < range[nr_range].end)
+ highest_pfn = range[nr_range].end;
nr_range++;
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
@@ -1103,15 +953,15 @@
/* Check the holes: */
for (i = 0; i < nr_range - 1; i++) {
- if (range[i].end + 1 < range[i+1].start)
- total_trim_size += real_trim_memory(range[i].end + 1,
+ if (range[i].end < range[i+1].start)
+ total_trim_size += real_trim_memory(range[i].end,
range[i+1].start);
}
/* Check the top: */
i = nr_range - 1;
- if (range[i].end + 1 < end_pfn)
- total_trim_size += real_trim_memory(range[i].end + 1,
+ if (range[i].end < end_pfn)
+ total_trim_size += real_trim_memory(range[i].end,
end_pfn);
if (total_trim_size) {
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a966b75..740b440 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -12,21 +12,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/kexec.h>
-#include <linux/module.h>
-#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/setup.h>
-#include <asm/trampoline.h>
/*
* The e820 map is the map that gets modified e.g. with command line parameters
@@ -730,288 +722,6 @@
#endif
/*
- * Early reserved memory areas.
- */
-#define MAX_EARLY_RES 32
-
-struct early_res {
- u64 start, end;
- char name[16];
- char overlap_ok;
-};
-static struct early_res early_res[MAX_EARLY_RES] __initdata = {
- { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
-#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
-#endif
-
- {}
-};
-
-static int __init find_overlapped_early(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
-
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- r = &early_res[i];
- if (end > r->start && start < r->end)
- break;
- }
-
- return i;
-}
-
-/*
- * Drop the i-th range from the early reservation map,
- * by copying any higher ranges down one over it, and
- * clearing what had been the last slot.
- */
-static void __init drop_range(int i)
-{
- int j;
-
- for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
- ;
-
- memmove(&early_res[i], &early_res[i + 1],
- (j - 1 - i) * sizeof(struct early_res));
-
- early_res[j - 1].end = 0;
-}
-
-/*
- * Split any existing ranges that:
- * 1) are marked 'overlap_ok', and
- * 2) overlap with the stated range [start, end)
- * into whatever portion (if any) of the existing range is entirely
- * below or entirely above the stated range. Drop the portion
- * of the existing range that overlaps with the stated range,
- * which will allow the caller of this routine to then add that
- * stated range without conflicting with any existing range.
- */
-static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
- u64 lower_start, lower_end;
- u64 upper_start, upper_end;
- char name[16];
-
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- r = &early_res[i];
-
- /* Continue past non-overlapping ranges */
- if (end <= r->start || start >= r->end)
- continue;
-
- /*
- * Leave non-ok overlaps as is; let caller
- * panic "Overlapping early reservations"
- * when it hits this overlap.
- */
- if (!r->overlap_ok)
- return;
-
- /*
- * We have an ok overlap. We will drop it from the early
- * reservation map, and add back in any non-overlapping
- * portions (lower or upper) as separate, overlap_ok,
- * non-overlapping ranges.
- */
-
- /* 1. Note any non-overlapping (lower or upper) ranges. */
- strncpy(name, r->name, sizeof(name) - 1);
-
- lower_start = lower_end = 0;
- upper_start = upper_end = 0;
- if (r->start < start) {
- lower_start = r->start;
- lower_end = start;
- }
- if (r->end > end) {
- upper_start = end;
- upper_end = r->end;
- }
-
- /* 2. Drop the original ok overlapping range */
- drop_range(i);
-
- i--; /* resume for-loop on copied down entry */
-
- /* 3. Add back in any non-overlapping ranges. */
- if (lower_end)
- reserve_early_overlap_ok(lower_start, lower_end, name);
- if (upper_end)
- reserve_early_overlap_ok(upper_start, upper_end, name);
- }
-}
-
-static void __init __reserve_early(u64 start, u64 end, char *name,
- int overlap_ok)
-{
- int i;
- struct early_res *r;
-
- i = find_overlapped_early(start, end);
- if (i >= MAX_EARLY_RES)
- panic("Too many early reservations");
- r = &early_res[i];
- if (r->end)
- panic("Overlapping early reservations "
- "%llx-%llx %s to %llx-%llx %s\n",
- start, end - 1, name?name:"", r->start,
- r->end - 1, r->name);
- r->start = start;
- r->end = end;
- r->overlap_ok = overlap_ok;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
-}
-
-/*
- * A few early reservtations come here.
- *
- * The 'overlap_ok' in the name of this routine does -not- mean it
- * is ok for these reservations to overlap an earlier reservation.
- * Rather it means that it is ok for subsequent reservations to
- * overlap this one.
- *
- * Use this entry point to reserve early ranges when you are doing
- * so out of "Paranoia", reserving perhaps more memory than you need,
- * just in case, and don't mind a subsequent overlapping reservation
- * that is known to be needed.
- *
- * The drop_overlaps_that_are_ok() call here isn't really needed.
- * It would be needed if we had two colliding 'overlap_ok'
- * reservations, so that the second such would not panic on the
- * overlap with the first. We don't have any such as of this
- * writing, but might as well tolerate such if it happens in
- * the future.
- */
-void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
-{
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 1);
-}
-
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
-void __init reserve_early(u64 start, u64 end, char *name)
-{
- if (start >= end)
- return;
-
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
-}
-
-void __init free_early(u64 start, u64 end)
-{
- struct early_res *r;
- int i;
-
- i = find_overlapped_early(start, end);
- r = &early_res[i];
- if (i >= MAX_EARLY_RES || r->end != end || r->start != start)
- panic("free_early on not reserved area: %llx-%llx!",
- start, end - 1);
-
- drop_range(i);
-}
-
-void __init early_res_to_bootmem(u64 start, u64 end)
-{
- int i, count;
- u64 final_start, final_end;
-
- count = 0;
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
- count++;
-
- printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count, start, end);
- for (i = 0; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
- if (final_start >= final_end) {
- printk(KERN_CONT "\n");
- continue;
- }
- printk(KERN_CONT " ==> [%010llx - %010llx]\n",
- final_start, final_end);
- reserve_bootmem_generic(final_start, final_end - final_start,
- BOOTMEM_DEFAULT);
- }
-}
-
-/* Check for already reserved areas */
-static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
-{
- int i;
- u64 addr = *addrp;
- int changed = 0;
- struct early_res *r;
-again:
- i = find_overlapped_early(addr, addr + size);
- r = &early_res[i];
- if (i < MAX_EARLY_RES && r->end) {
- *addrp = addr = round_up(r->end, align);
- changed = 1;
- goto again;
- }
- return changed;
-}
-
-/* Check for already reserved areas */
-static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
-{
- int i;
- u64 addr = *addrp, last;
- u64 size = *sizep;
- int changed = 0;
-again:
- last = addr + size;
- for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
- changed = 1;
- goto again;
- }
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
- size = last - addr;
- changed = 1;
- goto again;
- }
- if (last <= r->end && addr >= r->start) {
- (*sizep)++;
- return 0;
- }
- }
- if (changed) {
- *addrp = addr;
- *sizep = size;
- }
- return changed;
-}
-
-/*
* Find a free area with specified alignment in a specific range.
*/
u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
@@ -1020,29 +730,36 @@
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
- u64 addr, last;
- u64 ei_last;
+ u64 addr;
+ u64 ei_start, ei_last;
if (ei->type != E820_RAM)
continue;
- addr = round_up(ei->addr, align);
+
ei_last = ei->addr + ei->size;
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- while (bad_addr(&addr, size, align) && addr+size <= ei_last)
- ;
- last = addr + size;
- if (last > ei_last)
- continue;
- if (last > end)
- continue;
- return addr;
+ ei_start = ei->addr;
+ addr = find_early_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
}
return -1ULL;
}
+u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
+{
+ return find_e820_area(start, end, size, align);
+}
+
+u64 __init get_max_mapped(void)
+{
+ u64 end = max_pfn_mapped;
+
+ end <<= PAGE_SHIFT;
+
+ return end;
+}
/*
* Find next free range after *start
*/
@@ -1052,25 +769,19 @@
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
- u64 addr, last;
- u64 ei_last;
+ u64 addr;
+ u64 ei_start, ei_last;
if (ei->type != E820_RAM)
continue;
- addr = round_up(ei->addr, align);
+
ei_last = ei->addr + ei->size;
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- *sizep = ei_last - addr;
- while (bad_addr_size(&addr, sizep, align) &&
- addr + *sizep <= ei_last)
- ;
- last = addr + *sizep;
- if (last > ei_last)
- continue;
- return addr;
+ ei_start = ei->addr;
+ addr = find_early_area_size(ei_start, ei_last, start,
+ sizep, align);
+
+ if (addr != -1ULL)
+ return addr;
}
return -1ULL;
@@ -1429,6 +1140,8 @@
end = MAX_RESOURCE_SIZE;
if (start >= end)
continue;
+ printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
+ start, end);
reserve_region_with_split(&iomem_resource, start, end,
"RAM buffer");
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 5051b94..adedeef 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,6 +29,16 @@
void __init i386_start_kernel(void)
{
+#ifdef CONFIG_X86_TRAMPOLINE
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
+ "EX TRAMPOLINE");
+#endif
+
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7fd318b..37c3d4b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -442,8 +442,8 @@
*/
cmpb $0,ready
jne 1f
- movl $per_cpu__gdt_page,%eax
- movl $per_cpu__stack_canary,%ecx
+ movl $gdt_page,%eax
+ movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -706,7 +706,7 @@
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
+ .long gdt_page /* Overwritten for secondary CPUs */
/*
* The boot_gdt must mirror the equivalent in setup.S and is
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index df89102..8c93a84 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
*/
static int i8259A_auto_eoi;
-DEFINE_SPINLOCK(i8259A_lock);
+DEFINE_RAW_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
struct irq_chip i8259A_chip = {
@@ -68,13 +68,13 @@
unsigned int mask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void enable_8259A_irq(unsigned int irq)
@@ -82,13 +82,13 @@
unsigned int mask = ~(1 << irq);
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
@@ -97,12 +97,12 @@
unsigned long flags;
int ret;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
@@ -150,7 +150,7 @@
unsigned int irqmask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
@@ -183,7 +183,7 @@
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
@@ -285,24 +285,24 @@
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void unmask_8259A(void)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void init_8259A(int auto_eoi)
@@ -311,7 +311,7 @@
i8259A_auto_eoi = auto_eoi;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -356,5 +356,5 @@
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index d593222..fce55d5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... IRQ0_VECTOR - 1] = -1,
- [IRQ0_VECTOR] = 0,
- [IRQ1_VECTOR] = 1,
- [IRQ2_VECTOR] = 2,
- [IRQ3_VECTOR] = 3,
- [IRQ4_VECTOR] = 4,
- [IRQ5_VECTOR] = 5,
- [IRQ6_VECTOR] = 6,
- [IRQ7_VECTOR] = 7,
- [IRQ8_VECTOR] = 8,
- [IRQ9_VECTOR] = 9,
- [IRQ10_VECTOR] = 10,
- [IRQ11_VECTOR] = 11,
- [IRQ12_VECTOR] = 12,
- [IRQ13_VECTOR] = 13,
- [IRQ14_VECTOR] = 14,
- [IRQ15_VECTOR] = 15,
- [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
+ [0 ... NR_VECTORS - 1] = -1,
};
int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@
return 0;
}
+/* Number of legacy interrupts */
+int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
+
void __init init_ISA_irqs(void)
{
int i;
@@ -142,6 +128,19 @@
void __init init_IRQ(void)
{
+ int i;
+
+ /*
+ * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
+ * If these IRQ's are handled by legacy interrupt-controllers like PIC,
+ * then this configuration will likely be static after the boot. If
+ * these IRQ's are handled by more mordern controllers like IO-APIC,
+ * then this vector space can be freed and re-used dynamically as the
+ * irq's migrate etc.
+ */
+ for (i = 0; i < nr_legacy_irqs; i++)
+ per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
+
x86_init.irqs.intr_init();
}
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 712d15f..7182580 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -7,6 +7,8 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/range.h>
+
#include <asm/pci-direct.h>
#include <linux/sort.h>
#include <asm/io.h>
@@ -30,11 +32,6 @@
{ 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
};
-struct range {
- u64 start;
- u64 end;
-};
-
static int __cpuinit cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b1739d..1db183e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -428,10 +428,6 @@
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = kmap_atomic,
-#endif
-
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 75e14e2..1aa966c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -65,7 +65,7 @@
}
EXPORT_SYMBOL(dma_set_mask);
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
static __initdata void *dma32_bootmem_ptr;
static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
@@ -116,14 +116,21 @@
dma32_bootmem_ptr = NULL;
dma32_bootmem_size = 0;
}
+#else
+void __init dma32_reserve_bootmem(void)
+{
+}
+static void __init dma32_free_bootmem(void)
+{
+}
+
#endif
void __init pci_iommu_alloc(void)
{
-#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
-#endif
+
if (pci_swiotlb_detect())
goto out;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddc..8e1aac8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
+ { /* Handle problems with rebooting on the iMac9,1. */
+ .callback = set_pci_reboot,
+ .ident = "Apple iMac9,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cb42109..5d7ba1a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -969,16 +969,12 @@
#endif
initmem_init(0, max_pfn, acpi, k8);
-
-#ifdef CONFIG_X86_64
- /*
- * dma32_reserve_bootmem() allocates bootmem which may conflict
- * with the crashkernel command line, so do that after
- * reserve_crashkernel()
- */
- dma32_reserve_bootmem();
+#ifndef CONFIG_NO_BOOTMEM
+ early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
#endif
+ dma32_reserve_bootmem();
+
reserve_ibft_region();
#ifdef CONFIG_KVM_CLOCK
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 35abcb8..ef6370b 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,7 +137,13 @@
static void __init pcpu_fc_free(void *ptr, size_t size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ u64 start = __pa(ptr);
+ u64 end = start + size;
+ free_early_partial(start, end);
+#else
free_bootmem(__pa(ptr), size);
+#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9b44011..a435c76 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -241,6 +241,11 @@
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
+
+ /*
+ * Need to setup vector mappings before we enable interrupts.
+ */
+ __setup_vector_irq(smp_processor_id());
/*
* Get our bogomips.
*
@@ -315,7 +320,6 @@
*/
ipi_call_lock();
lock_vector_lock();
- __setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
ipi_call_unlock();
@@ -1212,11 +1216,12 @@
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
- if (possible > CONFIG_NR_CPUS) {
+ /* nr_cpu_ids could be reduced via nr_cpus= */
+ if (possible > nr_cpu_ids) {
printk(KERN_WARNING
"%d Processors exceeds NR_CPUS limit of %d\n",
- possible, CONFIG_NR_CPUS);
- possible = CONFIG_NR_CPUS;
+ possible, nr_cpu_ids);
+ possible = nr_cpu_ids;
}
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be25734..fb5cc5e1 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@
* manually to deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
- spin_lock(&i8259A_lock);
+ raw_spin_lock(&i8259A_lock);
outb(0x0c, PIC_MASTER_OCW3);
/* Ack the IRQ; AEOI will end it automatically. */
inb(PIC_MASTER_POLL);
- spin_unlock(&i8259A_lock);
+ raw_spin_unlock(&i8259A_lock);
}
global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 34a279a..ab38ce0 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -559,7 +559,7 @@
struct irq_desc *desc;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/* Find out what's interrupting in the PIIX4 master 8259 */
outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -596,7 +596,7 @@
outb(0x60 + realirq, 0x20);
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_to_desc(realirq);
@@ -614,7 +614,7 @@
return IRQ_HANDLED;
out_unlock:
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return IRQ_NONE;
}
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index d430e4c..7dd599d 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -33,6 +33,7 @@
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
@@ -266,30 +267,6 @@
{
}
-#ifdef CONFIG_HIGHPTE
-static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- void *va = kmap_atomic(page, type);
-
- /*
- * Internally, the VMI ROM must map virtual addresses to physical
- * addresses for processing MMU updates. By the time MMU updates
- * are issued, this information is typically already lost.
- * Fortunately, the VMI provides a cache of mapping slots for active
- * page tables.
- *
- * We use slot zero for the linear mapping of physical memory, and
- * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
- *
- * args: SLOT VA COUNT PFN
- */
- BUG_ON(type != KM_PTE0 && type != KM_PTE1);
- vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
-
- return va;
-}
-#endif
-
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
{
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -640,6 +617,12 @@
u64 reloc;
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
+ /*
+ * Prevent page tables from being allocated in highmem, even if
+ * CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
printk(KERN_ERR "VMI ROM failed to initialize!");
return 0;
@@ -778,10 +761,6 @@
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-#ifdef CONFIG_HIGHPTE
- if (vmi_ops.set_linear_mapping)
- pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
-#endif
/*
* These MUST always be patched. Don't support indirect jumps
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb..2f1ca56 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@
static inline unsigned int vmi_get_timer_vector(void)
{
-#ifdef CONFIG_X86_IO_APIC
- return FIRST_DEVICE_VECTOR;
-#else
- return FIRST_EXTERNAL_VECTOR;
-#endif
+ return IRQ0_VECTOR;
}
/** vmi clockchip */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index f92a0da..44879df 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -341,7 +341,7 @@
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
*/
-#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);
@@ -352,7 +352,7 @@
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-. = ASSERT((per_cpu__irq_stack_union == 0),
+. = ASSERT((irq_stack_union == 0),
"irq_stack_union is not at start of per-cpu area");
#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2226f2c..5cb3f0f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -750,6 +750,7 @@
free_area_init_nodes(max_zone_pfns);
}
+#ifndef CONFIG_NO_BOOTMEM
static unsigned long __init setup_node_bootmem(int nodeid,
unsigned long start_pfn,
unsigned long end_pfn,
@@ -766,13 +767,14 @@
printk(KERN_INFO " node %d bootmap %08lx - %08lx\n",
nodeid, bootmap, bootmap + bootmap_size);
free_bootmem_with_active_regions(nodeid, end_pfn);
- early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
return bootmap + bootmap_size;
}
+#endif
void __init setup_bootmem_allocator(void)
{
+#ifndef CONFIG_NO_BOOTMEM
int nodeid;
unsigned long bootmap_size, bootmap;
/*
@@ -784,11 +786,13 @@
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
+#endif
printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
+#ifndef CONFIG_NO_BOOTMEM
for_each_online_node(nodeid) {
unsigned long start_pfn, end_pfn;
@@ -806,6 +810,7 @@
bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
bootmap);
}
+#endif
after_bootmem = 1;
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 69ddfbd..e9b040e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -572,6 +572,7 @@
void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
int acpi, int k8)
{
+#ifndef CONFIG_NO_BOOTMEM
unsigned long bootmap_size, bootmap;
bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
@@ -579,13 +580,15 @@
PAGE_SIZE);
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
+ reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
/* don't touch min_low_pfn */
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
0, end_pfn);
e820_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
- early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
- reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
+#else
+ e820_register_active_regions(0, start_pfn, end_pfn);
+#endif
}
#endif
@@ -974,7 +977,7 @@
if (pmd_none(*pmd)) {
pte_t entry;
- p = vmemmap_alloc_block(PMD_SIZE, node);
+ p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (!p)
return -ENOMEM;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index b20760c..809baaa 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -418,7 +418,10 @@
for_each_online_node(nid) {
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+ NODE_DATA(nid)->node_id = nid;
+#ifndef CONFIG_NO_BOOTMEM
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+#endif
}
setup_bootmem_allocator();
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 3307ea8..8948f47 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -163,30 +163,48 @@
unsigned long end, unsigned long size,
unsigned long align)
{
- unsigned long mem = find_e820_area(start, end, size, align);
- void *ptr;
+ unsigned long mem;
+ /*
+ * put it on high as possible
+ * something will go with NODE_DATA
+ */
+ if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
+ start = MAX_DMA_PFN<<PAGE_SHIFT;
+ if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
+ end > (MAX_DMA32_PFN<<PAGE_SHIFT))
+ start = MAX_DMA32_PFN<<PAGE_SHIFT;
+ mem = find_e820_area(start, end, size, align);
if (mem != -1L)
return __va(mem);
- ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
- if (ptr == NULL) {
- printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
+ /* extend the search scope */
+ end = max_pfn_mapped << PAGE_SHIFT;
+ if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
+ start = MAX_DMA32_PFN<<PAGE_SHIFT;
+ else
+ start = MAX_DMA_PFN<<PAGE_SHIFT;
+ mem = find_e820_area(start, end, size, align);
+ if (mem != -1L)
+ return __va(mem);
+
+ printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
size, nodeid);
- return NULL;
- }
- return ptr;
+
+ return NULL;
}
/* Initialize bootmem allocator for a node */
void __init
setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
{
- unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
+ unsigned long start_pfn, last_pfn, nodedata_phys;
const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
- unsigned long bootmap_start, nodedata_phys;
- void *bootmap;
int nid;
+#ifndef CONFIG_NO_BOOTMEM
+ unsigned long bootmap_start, bootmap_pages, bootmap_size;
+ void *bootmap;
+#endif
if (!end)
return;
@@ -200,7 +218,7 @@
start = roundup(start, ZONE_ALIGN);
- printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
+ printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
start, end);
start_pfn = start >> PAGE_SHIFT;
@@ -211,14 +229,21 @@
if (node_data[nodeid] == NULL)
return;
nodedata_phys = __pa(node_data[nodeid]);
+ reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
nodedata_phys + pgdat_size - 1);
+ nid = phys_to_nid(nodedata_phys);
+ if (nid != nodeid)
+ printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
- NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
+ NODE_DATA(nodeid)->node_id = nodeid;
NODE_DATA(nodeid)->node_start_pfn = start_pfn;
NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
+#ifndef CONFIG_NO_BOOTMEM
+ NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
+
/*
* Find a place for the bootmem map
* nodedata_phys could be on other nodes by alloc_bootmem,
@@ -227,11 +252,7 @@
* of alloc_bootmem, that could clash with reserved range
*/
bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
- nid = phys_to_nid(nodedata_phys);
- if (nid == nodeid)
- bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
- else
- bootmap_start = roundup(start, PAGE_SIZE);
+ bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
/*
* SMP_CACHE_BYTES could be enough, but init_bootmem_node like
* to use that to align to PAGE_SIZE
@@ -239,18 +260,13 @@
bootmap = early_node_mem(nodeid, bootmap_start, end,
bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
if (bootmap == NULL) {
- if (nodedata_phys < start || nodedata_phys >= end) {
- /*
- * only need to free it if it is from other node
- * bootmem
- */
- if (nid != nodeid)
- free_bootmem(nodedata_phys, pgdat_size);
- }
+ free_early(nodedata_phys, nodedata_phys + pgdat_size);
node_data[nodeid] = NULL;
return;
}
bootmap_start = __pa(bootmap);
+ reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
+ "BOOTMAP");
bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
bootmap_start >> PAGE_SHIFT,
@@ -259,31 +275,12 @@
printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
bootmap_start, bootmap_start + bootmap_size - 1,
bootmap_pages);
-
- free_bootmem_with_active_regions(nodeid, end);
-
- /*
- * convert early reserve to bootmem reserve earlier
- * otherwise early_node_mem could use early reserved mem
- * on previous node
- */
- early_res_to_bootmem(start, end);
-
- /*
- * in some case early_node_mem could use alloc_bootmem
- * to get range on other node, don't reserve that again
- */
- if (nid != nodeid)
- printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
- else
- reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
- pgdat_size, BOOTMEM_DEFAULT);
nid = phys_to_nid(bootmap_start);
if (nid != nodeid)
printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
- else
- reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
- bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+
+ free_bootmem_with_active_regions(nodeid, end);
+#endif
node_set_online(nodeid);
}
@@ -709,6 +706,10 @@
for_each_online_node(i)
pages += free_all_bootmem_node(NODE_DATA(i));
+#ifdef CONFIG_NO_BOOTMEM
+ pages += free_all_memory_core_early(MAX_NUMNODES);
+#endif
+
return pages;
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 39fba37..0b7d3e9 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -14,8 +14,7 @@
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-y += common.o early.o
-obj-y += amd_bus.o
-obj-$(CONFIG_X86_64) += bus_numa.o
+obj-y += amd_bus.o bus_numa.o
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 95ecbd4..fc1e8fe 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -2,11 +2,11 @@
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/cpu.h>
+#include <linux/range.h>
+
#include <asm/pci_x86.h>
-#ifdef CONFIG_X86_64
#include <asm/pci-direct.h>
-#endif
#include "bus_numa.h"
@@ -15,60 +15,6 @@
* also get peer root bus resource for io,mmio
*/
-#ifdef CONFIG_X86_64
-
-#define RANGE_NUM 16
-
-struct res_range {
- size_t start;
- size_t end;
-};
-
-static void __init update_range(struct res_range *range, size_t start,
- size_t end)
-{
- int i;
- int j;
-
- for (j = 0; j < RANGE_NUM; j++) {
- if (!range[j].end)
- continue;
-
- if (start <= range[j].start && end >= range[j].end) {
- range[j].start = 0;
- range[j].end = 0;
- continue;
- }
-
- if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
- range[j].start = end + 1;
- continue;
- }
-
-
- if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
- range[j].end = start - 1;
- continue;
- }
-
- if (start > range[j].start && end < range[j].end) {
- /* find the new spare */
- for (i = 0; i < RANGE_NUM; i++) {
- if (range[i].end == 0)
- break;
- }
- if (i < RANGE_NUM) {
- range[i].end = range[j].end;
- range[i].start = end + 1;
- } else {
- printk(KERN_ERR "run of slot in ranges\n");
- }
- range[j].end = start - 1;
- continue;
- }
- }
-}
-
struct pci_hostbridge_probe {
u32 bus;
u32 slot;
@@ -111,6 +57,8 @@
fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
}
+#define RANGE_NUM 16
+
/**
* early_fill_mp_bus_to_node()
* called before pcibios_scan_root and pci_scan_bus
@@ -130,16 +78,17 @@
struct pci_root_info *info;
u32 reg;
struct resource *res;
- size_t start;
- size_t end;
- struct res_range range[RANGE_NUM];
+ u64 start;
+ u64 end;
+ struct range range[RANGE_NUM];
u64 val;
u32 address;
+ bool found;
if (!early_pci_allowed())
return -1;
- found_all_numa_early = 0;
+ found = false;
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
u32 id;
u16 device;
@@ -153,12 +102,12 @@
device = (id>>16) & 0xffff;
if (pci_probes[i].vendor == vendor &&
pci_probes[i].device == device) {
- found_all_numa_early = 1;
+ found = true;
break;
}
}
- if (!found_all_numa_early)
+ if (!found)
return 0;
pci_root_num = 0;
@@ -196,7 +145,7 @@
def_link = (reg >> 8) & 0x03;
memset(range, 0, sizeof(range));
- range[0].end = 0xffff;
+ add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
/* io port resource */
for (i = 0; i < 4; i++) {
reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
@@ -220,13 +169,13 @@
info = &pci_root_info[j];
printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
- node, link, (u64)start, (u64)end);
+ node, link, start, end);
/* kernel only handle 16 bit only */
if (end > 0xffff)
end = 0xffff;
update_res(info, start, end, IORESOURCE_IO, 1);
- update_range(range, start, end);
+ subtract_range(range, RANGE_NUM, start, end + 1);
}
/* add left over io port range to def node/link, [0, 0xffff] */
/* find the position */
@@ -241,29 +190,32 @@
if (!range[i].end)
continue;
- update_res(info, range[i].start, range[i].end,
+ update_res(info, range[i].start, range[i].end - 1,
IORESOURCE_IO, 1);
}
}
memset(range, 0, sizeof(range));
/* 0xfd00000000-0xffffffffff for HT */
- range[0].end = (0xfdULL<<32) - 1;
+ end = cap_resource((0xfdULL<<32) - 1);
+ end++;
+ add_range(range, RANGE_NUM, 0, 0, end);
/* need to take out [0, TOM) for RAM*/
address = MSR_K8_TOP_MEM1;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
- printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
+ printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
if (end < (1ULL<<32))
- update_range(range, 0, end - 1);
+ subtract_range(range, RANGE_NUM, 0, end);
/* get mmconfig */
get_pci_mmcfg_amd_fam10h_range();
/* need to take out mmconf range */
if (fam10h_mmconf_end) {
printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
- update_range(range, fam10h_mmconf_start, fam10h_mmconf_end);
+ subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
+ fam10h_mmconf_end + 1);
}
/* mmio resource */
@@ -293,7 +245,7 @@
info = &pci_root_info[j];
printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
- node, link, (u64)start, (u64)end);
+ node, link, start, end);
/*
* some sick allocation would have range overlap with fam10h
* mmconf range, so need to update start and end.
@@ -318,14 +270,15 @@
/* we got a hole */
endx = fam10h_mmconf_start - 1;
update_res(info, start, endx, IORESOURCE_MEM, 0);
- update_range(range, start, endx);
- printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx);
+ subtract_range(range, RANGE_NUM, start,
+ endx + 1);
+ printk(KERN_CONT " ==> [%llx, %llx]", start, endx);
start = fam10h_mmconf_end + 1;
changed = 1;
}
if (changed) {
if (start <= end) {
- printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end);
+ printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end);
} else {
printk(KERN_CONT "%s\n", endx?"":" ==> none");
continue;
@@ -333,8 +286,9 @@
}
}
- update_res(info, start, end, IORESOURCE_MEM, 1);
- update_range(range, start, end);
+ update_res(info, cap_resource(start), cap_resource(end),
+ IORESOURCE_MEM, 1);
+ subtract_range(range, RANGE_NUM, start, end + 1);
printk(KERN_CONT "\n");
}
@@ -348,8 +302,8 @@
address = MSR_K8_TOP_MEM2;
rdmsrl(address, val);
end = (val & 0xffffff800000ULL);
- printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
- update_range(range, 1ULL<<32, end - 1);
+ printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
+ subtract_range(range, RANGE_NUM, 1ULL<<32, end);
}
/*
@@ -368,7 +322,8 @@
if (!range[i].end)
continue;
- update_res(info, range[i].start, range[i].end,
+ update_res(info, cap_resource(range[i].start),
+ cap_resource(range[i].end - 1),
IORESOURCE_MEM, 1);
}
}
@@ -384,24 +339,14 @@
info->bus_min, info->bus_max, info->node, info->link);
for (j = 0; j < res_num; j++) {
res = &info->res[j];
- printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n",
- busnum, j,
- (res->flags & IORESOURCE_IO)?"io port":"mmio",
- res->start, res->end);
+ printk(KERN_DEBUG "bus: %02x index %x %pR\n",
+ busnum, j, res);
}
}
return 0;
}
-#else /* !CONFIG_X86_64 */
-
-static int __init early_fill_mp_bus_info(void) { return 0; }
-
-#endif /* !CONFIG_X86_64 */
-
-/* common 32/64 bit code */
-
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
static void enable_pci_io_ecs(void *unused)
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 12d54ff..64a1228 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -1,11 +1,11 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/range.h>
#include "bus_numa.h"
int pci_root_num;
struct pci_root_info pci_root_info[PCI_ROOT_NR];
-int found_all_numa_early;
void x86_pci_root_bus_res_quirks(struct pci_bus *b)
{
@@ -21,10 +21,6 @@
if (!pci_root_num)
return;
- /* for amd, if only one root bus, don't need to do anything */
- if (pci_root_num < 2 && found_all_numa_early)
- return;
-
for (i = 0; i < pci_root_num; i++) {
if (pci_root_info[i].bus_min == b->number)
break;
@@ -52,8 +48,8 @@
}
}
-void __devinit update_res(struct pci_root_info *info, size_t start,
- size_t end, unsigned long flags, int merge)
+void __devinit update_res(struct pci_root_info *info, resource_size_t start,
+ resource_size_t end, unsigned long flags, int merge)
{
int i;
struct resource *res;
@@ -61,25 +57,28 @@
if (start > end)
return;
+ if (start == MAX_RESOURCE)
+ return;
+
if (!merge)
goto addit;
/* try to merge it with old one */
for (i = 0; i < info->res_num; i++) {
- size_t final_start, final_end;
- size_t common_start, common_end;
+ resource_size_t final_start, final_end;
+ resource_size_t common_start, common_end;
res = &info->res[i];
if (res->flags != flags)
continue;
- common_start = max((size_t)res->start, start);
- common_end = min((size_t)res->end, end);
+ common_start = max(res->start, start);
+ common_end = min(res->end, end);
if (common_start > common_end + 1)
continue;
- final_start = min((size_t)res->start, start);
- final_end = max((size_t)res->end, end);
+ final_start = min(res->start, start);
+ final_end = max(res->end, end);
res->start = final_start;
res->end = final_end;
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index 731b64e..804a4b4 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -1,5 +1,5 @@
-#ifdef CONFIG_X86_64
-
+#ifndef __BUS_NUMA_H
+#define __BUS_NUMA_H
/*
* sub bus (transparent) will use entres from 3 to store extra from
* root, so need to make sure we have enough slot there.
@@ -19,8 +19,7 @@
#define PCI_ROOT_NR 4
extern int pci_root_num;
extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
-extern int found_all_numa_early;
-extern void update_res(struct pci_root_info *info, size_t start,
- size_t end, unsigned long flags, int merge);
+extern void update_res(struct pci_root_info *info, resource_size_t start,
+ resource_size_t end, unsigned long flags, int merge);
#endif
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 5a8fbf8..dece3eb 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -255,10 +255,6 @@
*/
fs_initcall(pcibios_assign_resources);
-void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b)
-{
-}
-
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 36daccb..b607239 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -50,6 +50,7 @@
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
@@ -1094,6 +1095,12 @@
__supported_pte_mask |= _PAGE_IOMAP;
+ /*
+ * Prevent page tables from being allocated in highmem, even
+ * if CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
/* Work out if we support NX */
x86_configure_nx();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index bf4cd6b..f9eb7de 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1427,23 +1427,6 @@
#endif
}
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
- pgprot_t prot = PAGE_KERNEL;
-
- if (PagePinned(page))
- prot = PAGE_KERNEL_RO;
-
- if (0 && PageHighMem(page))
- printk("mapping highpte %lx type %d prot %s\n",
- page_to_pfn(page), type,
- (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
- return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
@@ -1902,10 +1885,6 @@
.alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init,
-#ifdef CONFIG_HIGHPTE
- .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
#ifdef CONFIG_X86_64
.set_pte = xen_set_pte,
#else
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 88e15de..22a2093 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -90,9 +90,9 @@
GET_THREAD_INFO(%eax)
movl TI_cpu(%eax), %eax
movl __per_cpu_offset(,%eax,4), %eax
- mov per_cpu__xen_vcpu(%eax), %eax
+ mov xen_vcpu(%eax), %eax
#else
- movl per_cpu__xen_vcpu, %eax
+ movl xen_vcpu, %eax
#endif
/* check IF state we're restoring */
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 704c141..ef71318 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -31,7 +31,7 @@
};
struct cryptd_queue {
- struct cryptd_cpu_queue *cpu_queue;
+ struct cryptd_cpu_queue __percpu *cpu_queue;
};
struct cryptd_instance_ctx {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48df..b872546 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, NR_CPUS);
+ acpi_parse_x2apic_affinity, nr_cpu_ids);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, NR_CPUS);
+ acpi_parse_processor_affinity, nr_cpu_ids);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a959f6a..d648a98 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -561,7 +561,7 @@
}
int acpi_processor_preregister_performance(
- struct acpi_processor_performance *performance)
+ struct acpi_processor_performance __percpu *performance)
{
int count, count_target;
int retval = 0;
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 4254457..b861c08 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -158,13 +158,11 @@
#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
-#ifdef MODULE
static long maddr[NR_CARDS];
static int irq[NR_CARDS];
module_param_array(maddr, long, NULL, 0);
module_param_array(irq, int, NULL, 0);
-#endif
#endif /* CONFIG_ISA */
@@ -598,12 +596,6 @@
save_car = readb(base_addr + (CyCAR << index));
cy_writeb(base_addr + (CyCAR << index), save_xir);
- /* validate the port# (as configured and open) */
- if (channel + chip * 4 >= cinfo->nports) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyTxRdy);
- goto end;
- }
info = &cinfo->ports[channel + chip * 4];
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
@@ -3316,13 +3308,10 @@
unsigned short cy_isa_irq, nboard;
void __iomem *cy_isa_address;
unsigned short i, j, cy_isa_nchan;
-#ifdef MODULE
int isparam = 0;
-#endif
nboard = 0;
-#ifdef MODULE
/* Check for module parameters */
for (i = 0; i < NR_CARDS; i++) {
if (maddr[i] || i) {
@@ -3332,7 +3321,6 @@
if (!maddr[i])
break;
}
-#endif
/* scan the address table probing for Cyclom-Y/ISA boards */
for (i = 0; i < NR_ISA_ADDRS; i++) {
@@ -3353,11 +3341,10 @@
iounmap(cy_isa_address);
continue;
}
-#ifdef MODULE
+
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
-#endif
/* find out the board's irq by probing */
cy_isa_irq = detect_isa_irq(cy_isa_address);
if (cy_isa_irq == 0) {
@@ -4208,3 +4195,4 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(CY_VERSION);
MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
+MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 4c3b59b..465185f 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -146,7 +146,7 @@
return;
/* This console adapter was removed so it is not usable. */
- if (vtermnos[index] < 0)
+ if (vtermnos[index] == -1)
return;
while (count > 0 || i > 0) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 517271c..911e1da 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -208,6 +208,7 @@
static void ip2_init_board(int, const struct firmware *);
static unsigned short find_eisa_board(int);
+static int ip2_setup(char *str);
/***************/
/* Static Data */
@@ -263,7 +264,7 @@
/* Macros */
/**********/
-#if defined(MODULE) && defined(IP2DEBUG_OPEN)
+#ifdef IP2DEBUG_OPEN
#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
tty->name,(pCh->flags), \
tty->count,/*GET_USE_COUNT(module)*/0,s)
@@ -285,7 +286,10 @@
MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
MODULE_LICENSE("GPL");
+#define MAX_CMD_STR 50
+
static int poll_only;
+static char cmd[MAX_CMD_STR];
static int Eisa_irq;
static int Eisa_slot;
@@ -309,6 +313,8 @@
MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
module_param(poll_only, bool, 0);
MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
+module_param_string(ip2, cmd, MAX_CMD_STR, 0);
+MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
/* for sysfs class support */
static struct class *ip2_class;
@@ -487,7 +493,6 @@
return fw;
}
-#ifndef MODULE
/******************************************************************************
* ip2_setup:
* str: kernel command line string
@@ -531,7 +536,6 @@
return 1;
}
__setup("ip2=", ip2_setup);
-#endif /* !MODULE */
static int __init ip2_loadmain(void)
{
@@ -539,14 +543,20 @@
int err = 0;
i2eBordStrPtr pB = NULL;
int rc = -1;
- struct pci_dev *pdev = NULL;
const struct firmware *fw = NULL;
+ char *str;
+
+ str = cmd;
if (poll_only) {
/* Hard lock the interrupts to zero */
irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
}
+ /* Check module parameter with 'ip2=' has been passed or not */
+ if (!poll_only && (!strncmp(str, "ip2=", 4)))
+ ip2_setup(str);
+
ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
/* process command line arguments to modprobe or
@@ -612,6 +622,7 @@
case PCI:
#ifdef CONFIG_PCI
{
+ struct pci_dev *pdev = NULL;
u32 addr;
int status;
@@ -626,7 +637,7 @@
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "can't enable device\n");
- break;
+ goto out;
}
ip2config.type[i] = PCI;
ip2config.pci_dev[i] = pci_dev_get(pdev);
@@ -638,6 +649,8 @@
dev_err(&pdev->dev, "I/O address error\n");
ip2config.irq[i] = pdev->irq;
+out:
+ pci_dev_put(pdev);
}
#else
printk(KERN_ERR "IP2: PCI card specified but PCI "
@@ -656,7 +669,6 @@
break;
} /* switch */
} /* for */
- pci_dev_put(pdev);
for (i = 0; i < IP2_MAX_BOARDS; ++i) {
if (ip2config.addr[i]) {
@@ -3197,3 +3209,5 @@
};
MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
+
+MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 300d5bd..be2e8f9 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -113,6 +113,8 @@
* 64-bit verification
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
@@ -140,7 +142,6 @@
#define InterruptTheCard(base) outw(0, (base) + 0xc)
#define ClearInterrupt(base) inw((base) + 0x0a)
-#define pr_dbg(str...) pr_debug("ISICOM: " str)
#ifdef DEBUG
#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
#else
@@ -249,8 +250,7 @@
spin_unlock_irqrestore(&card->card_lock, card->flags);
msleep(10);
}
- printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
- card->base);
+ pr_warning("Failed to lock Card (0x%lx)\n", card->base);
return 0; /* Failed to acquire the card! */
}
@@ -379,13 +379,13 @@
char *name, const char *routine)
{
if (!port) {
- printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
+ name, routine);
return 1;
}
if (port->magic != ISICOM_MAGIC) {
- printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
+ name, routine);
return 1;
}
@@ -450,8 +450,8 @@
if (!(inw(base + 0x02) & (1 << port->channel)))
continue;
- pr_dbg("txing %d bytes, port%d.\n", txcount,
- port->channel + 1);
+ pr_debug("txing %d bytes, port%d.\n",
+ txcount, port->channel + 1);
outw((port->channel << isi_card[card].shift_count) | txcount,
base);
residue = NO;
@@ -547,8 +547,8 @@
byte_count = header & 0xff;
if (channel + 1 > card->port_count) {
- printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): "
- "%d(channel) > port_count.\n", base, channel+1);
+ pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
+ __func__, base, channel+1);
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
@@ -582,14 +582,15 @@
if (port->status & ISI_DCD) {
if (!(header & ISI_DCD)) {
/* Carrier has been lost */
- pr_dbg("interrupt: DCD->low.\n"
- );
+ pr_debug("%s: DCD->low.\n",
+ __func__);
port->status &= ~ISI_DCD;
tty_hangup(tty);
}
} else if (header & ISI_DCD) {
/* Carrier has been detected */
- pr_dbg("interrupt: DCD->high.\n");
+ pr_debug("%s: DCD->high.\n",
+ __func__);
port->status |= ISI_DCD;
wake_up_interruptible(&port->port.open_wait);
}
@@ -641,17 +642,19 @@
break;
case 2: /* Statistics */
- pr_dbg("isicom_interrupt: stats!!!.\n");
+ pr_debug("%s: stats!!!\n", __func__);
break;
default:
- pr_dbg("Intr: Unknown code in status packet.\n");
+ pr_debug("%s: Unknown code in status packet.\n",
+ __func__);
break;
}
} else { /* Data Packet */
count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
- pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count);
+ pr_debug("%s: Can rx %d of %d bytes.\n",
+ __func__, count, byte_count);
word_count = count >> 1;
insw(base, rp, word_count);
byte_count -= (word_count << 1);
@@ -661,8 +664,8 @@
byte_count -= 2;
}
if (byte_count > 0) {
- pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
- "bytes...\n", base, channel + 1);
+ pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
+ __func__, base, channel + 1);
/* drain out unread xtra data */
while (byte_count > 0) {
inw(base);
@@ -888,8 +891,8 @@
struct isi_board *card = port->card;
if (--card->count < 0) {
- pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n",
- card->base, card->count);
+ pr_debug("%s: bad board(0x%lx) count %d.\n",
+ __func__, card->base, card->count);
card->count = 0;
}
/* last port was closed, shutdown that board too */
@@ -1681,13 +1684,13 @@
retval = tty_register_driver(isicom_normal);
if (retval) {
- pr_dbg("Couldn't register the dialin driver\n");
+ pr_debug("Couldn't register the dialin driver\n");
goto err_puttty;
}
retval = pci_register_driver(&isicom_driver);
if (retval < 0) {
- printk(KERN_ERR "ISICOM: Unable to register pci driver.\n");
+ pr_err("Unable to register pci driver.\n");
goto err_unrtty;
}
@@ -1717,3 +1720,8 @@
MODULE_AUTHOR("MultiTech");
MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("isi608.bin");
+MODULE_FIRMWARE("isi608em.bin");
+MODULE_FIRMWARE("isi616em.bin");
+MODULE_FIRMWARE("isi4608.bin");
+MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 63ee3bb..166495d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -164,24 +164,25 @@
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
static DEFINE_SPINLOCK(moxa_lock);
-/* Variables for insmod */
-#ifdef MODULE
+
static unsigned long baseaddr[MAX_BOARDS];
static unsigned int type[MAX_BOARDS];
static unsigned int numports[MAX_BOARDS];
-#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
MODULE_LICENSE("GPL");
-#ifdef MODULE
+MODULE_FIRMWARE("c218tunx.cod");
+MODULE_FIRMWARE("cp204unx.cod");
+MODULE_FIRMWARE("c320tunx.cod");
+
module_param_array(type, uint, NULL, 0);
MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
module_param_array(baseaddr, ulong, NULL, 0);
MODULE_PARM_DESC(baseaddr, "base address");
module_param_array(numports, uint, NULL, 0);
MODULE_PARM_DESC(numports, "numports (ignored for C218)");
-#endif
+
module_param(ttymajor, int, 0);
/*
@@ -1024,6 +1025,8 @@
{
unsigned int isabrds = 0;
int retval = 0;
+ struct moxa_board_conf *brd = moxa_boards;
+ unsigned int i;
printk(KERN_INFO "MOXA Intellio family driver version %s\n",
MOXA_VERSION);
@@ -1051,10 +1054,7 @@
}
/* Find the boards defined from module args. */
-#ifdef MODULE
- {
- struct moxa_board_conf *brd = moxa_boards;
- unsigned int i;
+
for (i = 0; i < MAX_BOARDS; i++) {
if (!baseaddr[i])
break;
@@ -1087,8 +1087,6 @@
isabrds++;
}
}
- }
-#endif
#ifdef CONFIG_PCI
retval = pci_register_driver(&moxa_pci_driver);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3d92306..e0c5d2a 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -895,8 +895,7 @@
if (inb(info->ioaddr + UART_LSR) == 0xff) {
spin_unlock_irqrestore(&info->slock, flags);
if (capable(CAP_SYS_ADMIN)) {
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
return 0;
} else
return -ENODEV;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 2ad7d37..a3f32a1 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -136,10 +136,6 @@
#define RECEIVE_BUF_MAX 4
-/* Define all types of vendors and devices to support */
-#define VENDOR1 0x1931 /* Vendor Option */
-#define DEVICE1 0x000c /* HSDPA card */
-
#define R_IIR 0x0000 /* Interrupt Identity Register */
#define R_FCR 0x0000 /* Flow Control Register */
#define R_IER 0x0004 /* Interrupt Enable Register */
@@ -371,6 +367,8 @@
struct mutex tty_sem;
wait_queue_head_t tty_wait;
struct async_icount tty_icount;
+
+ struct nozomi *dc;
};
/* Private data one for each card in the system */
@@ -405,7 +403,7 @@
/* Global variables */
static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(VENDOR1, DEVICE1)},
+ {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
{},
};
@@ -414,6 +412,8 @@
static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
static struct tty_driver *ntty_driver;
+static const struct tty_port_operations noz_tty_port_ops;
+
/*
* find card by tty_index
*/
@@ -853,8 +853,6 @@
goto put;
}
- tty_buffer_request_room(tty, size);
-
while (size > 0) {
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
@@ -1473,9 +1471,11 @@
for (i = 0; i < MAX_PORT; i++) {
struct device *tty_dev;
-
- mutex_init(&dc->port[i].tty_sem);
- tty_port_init(&dc->port[i].port);
+ struct port *port = &dc->port[i];
+ port->dc = dc;
+ mutex_init(&port->tty_sem);
+ tty_port_init(&port->port);
+ port->port.ops = &noz_tty_port_ops;
tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
@@ -1600,67 +1600,74 @@
* ----------------------------------------------------------------------------
*/
-/* Called when the userspace process opens the tty, /dev/noz*. */
-static int ntty_open(struct tty_struct *tty, struct file *file)
+static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct port *port = get_port_by_tty(tty);
struct nozomi *dc = get_dc_by_tty(tty);
- unsigned long flags;
-
+ int ret;
if (!port || !dc || dc->state != NOZOMI_STATE_READY)
return -ENODEV;
-
- if (mutex_lock_interruptible(&port->tty_sem))
- return -ERESTARTSYS;
-
- port->port.count++;
- dc->open_ttys++;
-
- /* Enable interrupt downlink for channel */
- if (port->port.count == 1) {
- tty->driver_data = port;
- tty_port_tty_set(&port->port, tty);
- DBG1("open: %d", port->token_dl);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier = dc->last_ier | port->token_dl;
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ ret = tty_init_termios(tty);
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ driver->ttys[tty->index] = tty;
}
- mutex_unlock(&port->tty_sem);
+ return ret;
+}
+
+static void ntty_cleanup(struct tty_struct *tty)
+{
+ tty->driver_data = NULL;
+}
+
+static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
+ unsigned long flags;
+
+ DBG1("open: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier = dc->last_ier | port->token_dl;
+ writew(dc->last_ier, dc->reg_ier);
+ dc->open_ttys++;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: activated %d: %p\n", tty->index, tport);
return 0;
}
-/* Called when the userspace process close the tty, /dev/noz*. Also
- called immediately if ntty_open fails in which case tty->driver_data
- will be NULL an we exit by the first return */
-
-static void ntty_close(struct tty_struct *tty, struct file *file)
+static int ntty_open(struct tty_struct *tty, struct file *filp)
{
- struct nozomi *dc = get_dc_by_tty(tty);
- struct port *nport = tty->driver_data;
- struct tty_port *port = &nport->port;
+ struct port *port = get_port_by_tty(tty);
+ return tty_port_open(&port->port, tty, filp);
+}
+
+static void ntty_shutdown(struct tty_port *tport)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
unsigned long flags;
- if (!dc || !nport)
- return;
-
- /* Users cannot interrupt a close */
- mutex_lock(&nport->tty_sem);
-
- WARN_ON(!port->count);
-
+ DBG1("close: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier &= ~(port->token_dl);
+ writew(dc->last_ier, dc->reg_ier);
dc->open_ttys--;
- port->count--;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: shutdown %p\n", tport);
+}
- if (port->count == 0) {
- DBG1("close: %d", nport->token_dl);
- tty_port_tty_set(port, NULL);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier &= ~(nport->token_dl);
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
- }
- mutex_unlock(&nport->tty_sem);
+static void ntty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, filp);
+}
+
+static void ntty_hangup(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
}
/*
@@ -1680,15 +1687,7 @@
if (!dc || !port)
return -ENODEV;
- if (unlikely(!mutex_trylock(&port->tty_sem))) {
- /*
- * must test lock as tty layer wraps calls
- * to this function with BKL
- */
- dev_err(&dc->pdev->dev, "Would have deadlocked - "
- "return EAGAIN\n");
- return -EAGAIN;
- }
+ mutex_lock(&port->tty_sem);
if (unlikely(!port->port.count)) {
DBG1(" ");
@@ -1728,25 +1727,23 @@
* This method is called by the upper tty layer.
* #according to sources N_TTY.c it expects a value >= 0 and
* does not check for negative values.
+ *
+ * If the port is unplugged report lots of room and let the bits
+ * dribble away so we don't block anything.
*/
static int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
- int room = 0;
+ int room = 4096;
const struct nozomi *dc = get_dc_by_tty(tty);
- if (!dc || !port)
- return 0;
- if (!mutex_trylock(&port->tty_sem))
- return 0;
-
- if (!port->port.count)
- goto exit;
-
- room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
-
-exit:
- mutex_unlock(&port->tty_sem);
+ if (dc) {
+ mutex_lock(&port->tty_sem);
+ if (port->port.count)
+ room = port->fifo_ul.size -
+ kfifo_len(&port->fifo_ul);
+ mutex_unlock(&port->tty_sem);
+ }
return room;
}
@@ -1906,10 +1903,16 @@
return rval;
}
+static const struct tty_port_operations noz_tty_port_ops = {
+ .activate = ntty_activate,
+ .shutdown = ntty_shutdown,
+};
+
static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
+ .hangup = ntty_hangup,
.write = ntty_write,
.write_room = ntty_write_room,
.unthrottle = ntty_unthrottle,
@@ -1917,6 +1920,8 @@
.chars_in_buffer = ntty_chars_in_buffer,
.tiocmget = ntty_tiocmget,
.tiocmset = ntty_tiocmset,
+ .install = ntty_install,
+ .cleanup = ntty_cleanup,
};
/* Module initialization */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 452370a..986aa60 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -658,8 +658,7 @@
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- len = tty_buffer_request_room(tty, char_count);
- while (len--) {
+ while (char_count--) {
data = base_addr[CyRDR];
tty_insert_flip_char(tty, data, TTY_NORMAL);
#ifdef CYCLOM_16Y_HACK
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 268e17f..07ac14d 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -646,8 +646,6 @@
dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
port->hits[count > 8 ? 9 : count]++;
- tty_buffer_request_room(tty, count);
-
while (count--)
tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
tty_flip_buffer_push(tty);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 4846b73..0658fc5 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2031,7 +2031,7 @@
if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
return 0;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
return 0;
spin_lock_irqsave(&info->irq_spinlock, flags);
@@ -2121,7 +2121,7 @@
if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
goto cleanup;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
goto cleanup;
if ( info->params.mode == MGSL_MODE_HDLC ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 8678f0c..4561ce2 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -468,7 +468,7 @@
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
static void set_signals(struct slgt_info *info);
@@ -813,59 +813,32 @@
int ret = 0;
struct slgt_info *info = tty->driver_data;
unsigned long flags;
- unsigned int bufs_needed;
if (sanity_check(info, tty->name, "write"))
- goto cleanup;
+ return -EIO;
+
DBGINFO(("%s write count=%d\n", info->device_name, count));
- if (!info->tx_buf)
- goto cleanup;
+ if (!info->tx_buf || (count > info->max_frame_size))
+ return -EIO;
- if (count > info->max_frame_size) {
- ret = -EIO;
- goto cleanup;
- }
+ if (!count || tty->stopped || tty->hw_stopped)
+ return 0;
- if (!count)
- goto cleanup;
+ spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active && info->tx_count) {
+ if (info->tx_count) {
/* send accumulated data from send_char() */
- tx_load(info, info->tx_buf, info->tx_count);
- goto start;
+ if (!tx_load(info, info->tx_buf, info->tx_count))
+ goto cleanup;
+ info->tx_count = 0;
}
- bufs_needed = (count/DMABUFSIZE);
- if (count % DMABUFSIZE)
- ++bufs_needed;
- if (bufs_needed > free_tbuf_count(info))
- goto cleanup;
- ret = info->tx_count = count;
- tx_load(info, buf, count);
- goto start;
-
-start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
- else if (!(rd_reg32(info, TDCSR) & BIT0)) {
- /* transmit still active but transmit DMA stopped */
- unsigned int i = info->tbuf_current;
- if (!i)
- i = info->tbuf_count;
- i--;
- /* if DMA buf unsent must try later after tx idle */
- if (desc_count(info->tbufs[i]))
- ret = 0;
- }
- if (ret > 0)
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ if (tx_load(info, buf, count))
+ ret = count;
cleanup:
+ spin_unlock_irqrestore(&info->lock, flags);
DBGINFO(("%s write rc=%d\n", info->device_name, ret));
return ret;
}
@@ -882,7 +855,7 @@
if (!info->tx_buf)
return 0;
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
+ if (info->tx_count < info->max_frame_size) {
info->tx_buf[info->tx_count++] = ch;
ret = 1;
}
@@ -981,10 +954,8 @@
DBGINFO(("%s flush_chars start transmit\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf,info->tx_count);
- tx_start(info);
- }
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -997,10 +968,9 @@
return;
DBGINFO(("%s flush_buffer\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- info->tx_count = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
tty_wakeup(tty);
}
@@ -1033,12 +1003,10 @@
if (sanity_check(info, tty->name, "tx_release"))
return;
DBGINFO(("%s tx_release\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf, info->tx_count);
- tx_start(info);
- }
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
}
/*
@@ -1506,27 +1474,25 @@
DBGINFO(("%s hdlc_xmit\n", dev->name));
+ if (!skb->len)
+ return NETDEV_TX_OK;
+
/* stop sending until this frame completes */
netif_stop_queue(dev);
- /* copy data to device buffers */
- info->tx_count = skb->len;
- tx_load(info, skb->data, skb->len);
-
/* update network statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- /* done with socket buffer, so free it */
- dev_kfree_skb(skb);
-
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- spin_lock_irqsave(&info->lock,flags);
- tx_start(info);
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ tx_load(info, skb->data, skb->len);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2180,7 +2146,7 @@
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
- if (info->tx_count)
+ if (info->tx_active)
isr_txeom(info, status);
}
if (info->rx_pio && (status & IRQ_RXDATA))
@@ -2276,13 +2242,42 @@
}
}
+/*
+ * return true if there are unsent tx DMA buffers, otherwise false
+ *
+ * if there are unsent buffers then info->tbuf_start
+ * is set to index of first unsent buffer
+ */
+static bool unsent_tbufs(struct slgt_info *info)
+{
+ unsigned int i = info->tbuf_current;
+ bool rc = false;
+
+ /*
+ * search backwards from last loaded buffer (precedes tbuf_current)
+ * for first unsent buffer (desc_count > 0)
+ */
+
+ do {
+ if (i)
+ i--;
+ else
+ i = info->tbuf_count - 1;
+ if (!desc_count(info->tbufs[i]))
+ break;
+ info->tbuf_start = i;
+ rc = true;
+ } while (i != info->tbuf_current);
+
+ return rc;
+}
+
static void isr_txeom(struct slgt_info *info, unsigned short status)
{
DBGISR(("%s txeom status=%04x\n", info->device_name, status));
slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
tdma_reset(info);
- reset_tbufs(info);
if (status & IRQ_TXUNDER) {
unsigned short val = rd_reg16(info, TCR);
wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
@@ -2297,8 +2292,12 @@
info->icount.txok++;
}
+ if (unsent_tbufs(info)) {
+ tx_start(info);
+ update_tx_timer(info);
+ return;
+ }
info->tx_active = false;
- info->tx_count = 0;
del_timer(&info->tx_timer);
@@ -3949,7 +3948,7 @@
info->tx_enabled = true;
}
- if (info->tx_count) {
+ if (desc_count(info->tbufs[info->tbuf_start])) {
info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4772,25 +4771,36 @@
}
/*
- * load transmit DMA buffer(s) with data
+ * load data into transmit DMA buffer ring and start transmitter if needed
+ * return true if data accepted, otherwise false (buffers full)
*/
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
{
unsigned short count;
unsigned int i;
struct slgt_desc *d;
- if (size == 0)
- return;
+ /* check required buffer space */
+ if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
+ return false;
DBGDATA(info, buf, size, "tx");
+ /*
+ * copy data to one or more DMA buffers in circular ring
+ * tbuf_start = first buffer for this data
+ * tbuf_current = next free buffer
+ *
+ * Copy all data before making data visible to DMA controller by
+ * setting descriptor count of the first buffer.
+ * This prevents an active DMA controller from reading the first DMA
+ * buffers of a frame and stopping before the final buffers are filled.
+ */
+
info->tbuf_start = i = info->tbuf_current;
while (size) {
d = &info->tbufs[i];
- if (++i == info->tbuf_count)
- i = 0;
count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
memcpy(d->buf, buf, count);
@@ -4808,11 +4818,27 @@
else
set_desc_eof(*d, 0);
- set_desc_count(*d, count);
+ /* set descriptor count for all but first buffer */
+ if (i != info->tbuf_start)
+ set_desc_count(*d, count);
d->buf_count = count;
+
+ if (++i == info->tbuf_count)
+ i = 0;
}
info->tbuf_current = i;
+
+ /* set first buffer count to make new data visible to DMA controller */
+ d = &info->tbufs[info->tbuf_start];
+ set_desc_count(*d, d->buf_count);
+
+ /* start transmitter if needed and update transmit timeout */
+ if (!info->tx_active)
+ tx_start(info);
+ update_tx_timer(info);
+
+ return true;
}
static int register_test(struct slgt_info *info)
@@ -4934,9 +4960,7 @@
spin_lock_irqsave(&info->lock,flags);
async_mode(info);
rx_start(info);
- info->tx_count = count;
tx_load(info, buf, count);
- tx_start(info);
spin_unlock_irqrestore(&info->lock, flags);
/* wait for receive complete */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 66fa4e1..af8d977 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -231,9 +231,10 @@
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
- * tty_insert_flip_string - Add characters to the tty buffer
+ * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
* @tty: tty structure
* @chars: characters
+ * @flag: flag value for each character
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
@@ -242,18 +243,19 @@
* Locking: Called functions may take tty->buf.lock
*/
-int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
- size_t size)
+int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+ const unsigned char *chars, char flag, size_t size)
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
break;
memcpy(tb->char_buf_ptr + tb->used, chars, space);
- memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ memset(tb->flag_buf_ptr + tb->used, flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -262,7 +264,7 @@
} while (unlikely(size > copied));
return copied;
}
-EXPORT_SYMBOL(tty_insert_flip_string);
+EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
@@ -283,7 +285,8 @@
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 3f653f7..500e740 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -706,12 +706,13 @@
/**
* tty_ldisc_reinit - reinitialise the tty ldisc
* @tty: tty to reinit
+ * @ldisc: line discipline to reinitialize
*
- * Switch the tty back to N_TTY line discipline and leave the
- * ldisc state closed
+ * Switch the tty to a line discipline and leave the ldisc
+ * state closed
*/
-static void tty_ldisc_reinit(struct tty_struct *tty)
+static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
struct tty_ldisc *ld;
@@ -721,10 +722,10 @@
/*
* Switch the line discipline back
*/
- ld = tty_ldisc_get(N_TTY);
+ ld = tty_ldisc_get(ldisc);
BUG_ON(IS_ERR(ld));
tty_ldisc_assign(tty, ld);
- tty_set_termios_ldisc(tty, N_TTY);
+ tty_set_termios_ldisc(tty, ldisc);
}
/**
@@ -745,6 +746,8 @@
void tty_ldisc_hangup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
+ int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
+ int err = 0;
/*
* FIXME! What are the locking issues here? This may me overdoing
@@ -772,25 +775,32 @@
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
/*
* Shutdown the current line discipline, and reset it to
- * N_TTY.
+ * N_TTY if need be.
+ *
+ * Avoid racing set_ldisc or tty_ldisc_release
*/
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
- /* Avoid racing set_ldisc or tty_ldisc_release */
- mutex_lock(&tty->ldisc_mutex);
- tty_ldisc_halt(tty);
- if (tty->ldisc) { /* Not yet closed */
- /* Switch back to N_TTY */
- tty_ldisc_reinit(tty);
- /* At this point we have a closed ldisc and we want to
- reopen it. We could defer this to the next open but
- it means auditing a lot of other paths so this is
- a FIXME */
- WARN_ON(tty_ldisc_open(tty, tty->ldisc));
- tty_ldisc_enable(tty);
+ mutex_lock(&tty->ldisc_mutex);
+ tty_ldisc_halt(tty);
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is
+ a FIXME */
+ if (tty->ldisc) { /* Not yet closed */
+ if (reset == 0) {
+ tty_ldisc_reinit(tty, tty->termios->c_line);
+ err = tty_ldisc_open(tty, tty->ldisc);
}
- mutex_unlock(&tty->ldisc_mutex);
- tty_reset_termios(tty);
+ /* If the re-open fails or we reset then go to N_TTY. The
+ N_TTY open cannot fail */
+ if (reset || err) {
+ tty_ldisc_reinit(tty, N_TTY);
+ WARN_ON(tty_ldisc_open(tty, tty->ldisc));
+ }
+ tty_ldisc_enable(tty);
}
+ mutex_unlock(&tty->ldisc_mutex);
+ if (reset)
+ tty_reset_termios(tty);
}
/**
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa1028..87778dc 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@
ret = -EFAULT;
goto out;
}
- if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
+ if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
ret = -EINVAL;
goto out;
}
@@ -1622,7 +1622,7 @@
* telling it that it has acquired. Also check if it has died and
* clean up (similar to logic employed in change_console())
*/
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@
* vt to auto control.
*/
vc = vc_cons[fg_console].d;
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1693,27 +1693,28 @@
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
+ if(vc->vt_mode.mode == VT_PROCESS)
+ /*
+ * It worked. Mark the vt to switch to and
+ * return. The process needs to send us a
+ * VT_RELDISP ioctl to complete the switch.
+ */
+ return;
+ } else {
/*
- * It worked. Mark the vt to switch to and
- * return. The process needs to send us a
- * VT_RELDISP ioctl to complete the switch.
+ * The controlling process has died, so we revert back to
+ * normal operation. In this case, we'll also change back
+ * to KD_TEXT mode. I'm not sure if this is strictly correct
+ * but it saves the agony when the X server dies and the screen
+ * remains blanked due to KD_GRAPHICS! It would be nice to do
+ * this outside of VT_PROCESS but there is no single process
+ * to account for and tracking tty count may be undesirable.
*/
- return;
+ reset_vc(vc);
}
/*
- * The controlling process has died, so we revert back to
- * normal operation. In this case, we'll also change back
- * to KD_TEXT mode. I'm not sure if this is strictly correct
- * but it saves the agony when the X server dies and the screen
- * remains blanked due to KD_GRAPHICS! It would be nice to do
- * this outside of VT_PROCESS but there is no single process
- * to account for and tracking tty count may be undesirable.
- */
- reset_vc(vc);
-
- /*
- * Fall through to normal (VT_AUTO) handling of the switch...
+ * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
*/
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230..87399ca 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@
/**
* channel_table - percpu lookup table for memory-to-memory offload providers
*/
-static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
{
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3391e67..cf17dbb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,7 +13,7 @@
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
-static struct msr *msrs;
+static struct msr __percpu *msrs;
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
@@ -2553,14 +2553,14 @@
if (on) {
if (reg->l & K8_MSR_MCGCTL_NBE)
- pvt->flags.ecc_report = 1;
+ pvt->flags.nb_mce_enable = 1;
reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
- * Turn off ECC reporting only when it was off before
+ * Turn off NB MCE reporting only when it was off before
*/
- if (!pvt->flags.ecc_report)
+ if (!pvt->flags.nb_mce_enable)
reg->l &= ~K8_MSR_MCGCTL_NBE;
}
}
@@ -2571,22 +2571,11 @@
return 0;
}
-/*
- * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
- * enable it.
- */
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
- if (!ecc_enable_override)
- return;
-
- amd64_printk(KERN_WARNING,
- "'ecc_enable_override' parameter is active, "
- "Enabling AMD ECC hardware now: CAUTION\n");
-
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
/* turn on UECCn and CECCEn bits */
@@ -2611,6 +2600,8 @@
"This node reports that DRAM ECC is "
"currently Disabled; ENABLING now\n");
+ pvt->flags.nb_ecc_prev = 0;
+
/* Attempt to turn on DRAM ECC Enable */
value |= K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
@@ -2625,7 +2616,10 @@
amd64_printk(KERN_DEBUG,
"Hardware accepted DRAM ECC Enable\n");
}
+ } else {
+ pvt->flags.nb_ecc_prev = 1;
}
+
debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
@@ -2644,12 +2638,18 @@
value &= ~mask;
value |= pvt->old_nbctl;
- /* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+ /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
+ if (!pvt->flags.nb_ecc_prev) {
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ value &= ~K8_NBCFG_ECC_ENABLE;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+ }
+
+ /* restore the NB Enable MCGCTL bit */
if (amd64_toggle_ecc_err_reporting(pvt, OFF))
- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
- "MCGCTL!\n");
+ amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
}
/*
@@ -2690,8 +2690,9 @@
if (!ecc_enable_override) {
amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
+ } else {
+ amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
}
- ecc_enable_override = 0;
}
return 0;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 41bc561..0d4bf56 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -487,7 +487,8 @@
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
- unsigned long ecc_report:1;
+ unsigned long nb_mce_enable:1;
+ unsigned long nb_ecc_prev:1;
} flags;
};
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4eeaed5..8be720b 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -25,6 +25,7 @@
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
+#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -32,7 +33,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -368,39 +368,56 @@
for_each_client(device, wake_up_client);
}
-static int ioctl_get_info(struct client *client, void *buffer)
+union ioctl_arg {
+ struct fw_cdev_get_info get_info;
+ struct fw_cdev_send_request send_request;
+ struct fw_cdev_allocate allocate;
+ struct fw_cdev_deallocate deallocate;
+ struct fw_cdev_send_response send_response;
+ struct fw_cdev_initiate_bus_reset initiate_bus_reset;
+ struct fw_cdev_add_descriptor add_descriptor;
+ struct fw_cdev_remove_descriptor remove_descriptor;
+ struct fw_cdev_create_iso_context create_iso_context;
+ struct fw_cdev_queue_iso queue_iso;
+ struct fw_cdev_start_iso start_iso;
+ struct fw_cdev_stop_iso stop_iso;
+ struct fw_cdev_get_cycle_timer get_cycle_timer;
+ struct fw_cdev_allocate_iso_resource allocate_iso_resource;
+ struct fw_cdev_send_stream_packet send_stream_packet;
+ struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
+};
+
+static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_info *get_info = buffer;
+ struct fw_cdev_get_info *a = &arg->get_info;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
- client->version = get_info->version;
- get_info->version = FW_CDEV_VERSION;
- get_info->card = client->device->card->index;
+ client->version = a->version;
+ a->version = FW_CDEV_VERSION;
+ a->card = client->device->card->index;
down_read(&fw_device_rwsem);
- if (get_info->rom != 0) {
- void __user *uptr = u64_to_uptr(get_info->rom);
- size_t want = get_info->rom_length;
+ if (a->rom != 0) {
+ size_t want = a->rom_length;
size_t have = client->device->config_rom_length * 4;
- ret = copy_to_user(uptr, client->device->config_rom,
- min(want, have));
+ ret = copy_to_user(u64_to_uptr(a->rom),
+ client->device->config_rom, min(want, have));
}
- get_info->rom_length = client->device->config_rom_length * 4;
+ a->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
- client->bus_reset_closure = get_info->bus_reset_closure;
- if (get_info->bus_reset != 0) {
- void __user *uptr = u64_to_uptr(get_info->bus_reset);
-
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
- if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+ if (copy_to_user(u64_to_uptr(a->bus_reset),
+ &bus_reset, sizeof(bus_reset)))
return -EFAULT;
}
@@ -571,11 +588,9 @@
return ret;
}
-static int ioctl_send_request(struct client *client, void *buffer)
+static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
-
- switch (request->tcode) {
+ switch (arg->send_request.tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_QUADLET_REQUEST:
@@ -592,7 +607,7 @@
return -EINVAL;
}
- return init_request(client, request, client->device->node_id,
+ return init_request(client, &arg->send_request, client->device->node_id,
client->device->max_speed);
}
@@ -683,9 +698,9 @@
kfree(r);
}
-static int ioctl_allocate(struct client *client, void *buffer)
+static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_allocate *request = buffer;
+ struct fw_cdev_allocate *a = &arg->allocate;
struct address_handler_resource *r;
struct fw_address_region region;
int ret;
@@ -694,13 +709,13 @@
if (r == NULL)
return -ENOMEM;
- region.start = request->offset;
- region.end = request->offset + request->length;
- r->handler.length = request->length;
+ region.start = a->offset;
+ region.end = a->offset + a->length;
+ r->handler.length = a->length;
r->handler.address_callback = handle_request;
- r->handler.callback_data = r;
- r->closure = request->closure;
- r->client = client;
+ r->handler.callback_data = r;
+ r->closure = a->closure;
+ r->client = client;
ret = fw_core_add_address_handler(&r->handler, ®ion);
if (ret < 0) {
@@ -714,27 +729,25 @@
release_address_handler(client, &r->resource);
return ret;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
}
-static int ioctl_deallocate(struct client *client, void *buffer)
+static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->deallocate.handle,
release_address_handler, NULL);
}
-static int ioctl_send_response(struct client *client, void *buffer)
+static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_response *request = buffer;
+ struct fw_cdev_send_response *a = &arg->send_response;
struct client_resource *resource;
struct inbound_transaction_resource *r;
int ret = 0;
- if (release_client_resource(client, request->handle,
+ if (release_client_resource(client, a->handle,
release_request, &resource) < 0)
return -EINVAL;
@@ -743,28 +756,24 @@
if (is_fcp_request(r->request))
goto out;
- if (request->length < r->length)
- r->length = request->length;
- if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
+ if (a->length < r->length)
+ r->length = a->length;
+ if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
- fw_send_response(client->device->card, r->request, request->rcode);
+ fw_send_response(client->device->card, r->request, a->rcode);
out:
kfree(r);
return ret;
}
-static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_initiate_bus_reset *request = buffer;
- int short_reset;
-
- short_reset = (request->type == FW_CDEV_SHORT_RESET);
-
- return fw_core_initiate_bus_reset(client->device->card, short_reset);
+ return fw_core_initiate_bus_reset(client->device->card,
+ arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
}
static void release_descriptor(struct client *client,
@@ -777,9 +786,9 @@
kfree(r);
}
-static int ioctl_add_descriptor(struct client *client, void *buffer)
+static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_add_descriptor *request = buffer;
+ struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
struct descriptor_resource *r;
int ret;
@@ -787,22 +796,21 @@
if (!client->device->is_local)
return -ENOSYS;
- if (request->length > 256)
+ if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+ r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data,
- u64_to_uptr(request->data), request->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
ret = -EFAULT;
goto failed;
}
- r->descriptor.length = request->length;
- r->descriptor.immediate = request->immediate;
- r->descriptor.key = request->key;
+ r->descriptor.length = a->length;
+ r->descriptor.immediate = a->immediate;
+ r->descriptor.key = a->key;
r->descriptor.data = r->data;
ret = fw_core_add_descriptor(&r->descriptor);
@@ -815,7 +823,7 @@
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
failed:
@@ -824,11 +832,9 @@
return ret;
}
-static int ioctl_remove_descriptor(struct client *client, void *buffer)
+static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_remove_descriptor *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->remove_descriptor.handle,
release_descriptor, NULL);
}
@@ -851,49 +857,44 @@
sizeof(e->interrupt) + header_length, NULL, 0);
}
-static int ioctl_create_iso_context(struct client *client, void *buffer)
+static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_create_iso_context *request = buffer;
+ struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
/* We only support one context at this time. */
if (client->iso_context != NULL)
return -EBUSY;
- if (request->channel > 63)
+ if (a->channel > 63)
return -EINVAL;
- switch (request->type) {
+ switch (a->type) {
case FW_ISO_CONTEXT_RECEIVE:
- if (request->header_size < 4 || (request->header_size & 3))
+ if (a->header_size < 4 || (a->header_size & 3))
return -EINVAL;
-
break;
case FW_ISO_CONTEXT_TRANSMIT:
- if (request->speed > SCODE_3200)
+ if (a->speed > SCODE_3200)
return -EINVAL;
-
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card,
- request->type,
- request->channel,
- request->speed,
- request->header_size,
- iso_callback, client);
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed, a->header_size,
+ iso_callback, client);
if (IS_ERR(context))
return PTR_ERR(context);
- client->iso_closure = request->closure;
+ client->iso_closure = a->closure;
client->iso_context = context;
/* We only support one context at this time. */
- request->handle = 0;
+ a->handle = 0;
return 0;
}
@@ -906,9 +907,9 @@
#define GET_SY(v) (((v) >> 20) & 0x0f)
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
-static int ioctl_queue_iso(struct client *client, void *buffer)
+static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_queue_iso *request = buffer;
+ struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
unsigned long payload, buffer_end, header_length;
@@ -919,7 +920,7 @@
u8 header[256];
} u;
- if (ctx == NULL || request->handle != 0)
+ if (ctx == NULL || a->handle != 0)
return -EINVAL;
/*
@@ -929,23 +930,23 @@
* set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
- * and the request->data pointer is ignored.
+ * and the a->data pointer is ignored.
*/
- payload = (unsigned long)request->data - client->vm_start;
+ payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
- if (request->data == 0 || client->buffer.pages == NULL ||
+ if (a->data == 0 || client->buffer.pages == NULL ||
payload >= buffer_end) {
payload = 0;
buffer_end = 0;
}
- p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
- if (!access_ok(VERIFY_READ, p, request->size))
+ if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
- end = (void __user *)p + request->size;
+ end = (void __user *)p + a->size;
count = 0;
while (p < end) {
if (get_user(control, &p->control))
@@ -995,61 +996,78 @@
count++;
}
- request->size -= uptr_to_u64(p) - request->packets;
- request->packets = uptr_to_u64(p);
- request->data = client->vm_start + payload;
+ a->size -= uptr_to_u64(p) - a->packets;
+ a->packets = uptr_to_u64(p);
+ a->data = client->vm_start + payload;
return count;
}
-static int ioctl_start_iso(struct client *client, void *buffer)
+static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_start_iso *request = buffer;
+ struct fw_cdev_start_iso *a = &arg->start_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
- if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
- if (request->tags == 0 || request->tags > 15)
- return -EINVAL;
+ if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
+ (a->tags == 0 || a->tags > 15 || a->sync > 15))
+ return -EINVAL;
- if (request->sync > 15)
- return -EINVAL;
- }
-
- return fw_iso_context_start(client->iso_context, request->cycle,
- request->sync, request->tags);
+ return fw_iso_context_start(client->iso_context,
+ a->cycle, a->sync, a->tags);
}
-static int ioctl_stop_iso(struct client *client, void *buffer)
+static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_stop_iso *request = buffer;
+ struct fw_cdev_stop_iso *a = &arg->stop_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
}
-static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_cycle_timer *request = buffer;
+ struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
- unsigned long long bus_time;
- struct timeval tv;
- unsigned long flags;
+ struct timespec ts = {0, 0};
+ u32 cycle_time;
+ int ret = 0;
- preempt_disable();
- local_irq_save(flags);
+ local_irq_disable();
- bus_time = card->driver->get_bus_time(card);
- do_gettimeofday(&tv);
+ cycle_time = card->driver->get_cycle_time(card);
- local_irq_restore(flags);
- preempt_enable();
+ switch (a->clk_id) {
+ case CLOCK_REALTIME: getnstimeofday(&ts); break;
+ case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
+ case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
+ default:
+ ret = -EINVAL;
+ }
- request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
- request->cycle_timer = bus_time & 0xffffffff;
+ local_irq_enable();
+
+ a->tv_sec = ts.tv_sec;
+ a->tv_nsec = ts.tv_nsec;
+ a->cycle_timer = cycle_time;
+
+ return ret;
+}
+
+static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
+ struct fw_cdev_get_cycle_timer2 ct2;
+
+ ct2.clk_id = CLOCK_REALTIME;
+ ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
+
+ a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
+ a->cycle_timer = ct2.cycle_timer;
+
return 0;
}
@@ -1220,33 +1238,32 @@
return ret;
}
-static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC);
}
-static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
- release_iso_resource, NULL);
+ return release_client_resource(client,
+ arg->deallocate.handle, release_iso_resource, NULL);
}
-static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
}
-static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
}
/*
@@ -1254,16 +1271,17 @@
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
-static int ioctl_get_speed(struct client *client, void *buffer)
+static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
{
return client->device->max_speed;
}
-static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+static int ioctl_send_broadcast_request(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
+ struct fw_cdev_send_request *a = &arg->send_request;
- switch (request->tcode) {
+ switch (a->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
@@ -1272,36 +1290,36 @@
}
/* Security policy: Only allow accesses to Units Space. */
- if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+ if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
- return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+ return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
}
-static int ioctl_send_stream_packet(struct client *client, void *buffer)
+static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_stream_packet *p = buffer;
+ struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
struct fw_cdev_send_request request;
int dest;
- if (p->speed > client->device->card->link_speed ||
- p->length > 1024 << p->speed)
+ if (a->speed > client->device->card->link_speed ||
+ a->length > 1024 << a->speed)
return -EIO;
- if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+ if (a->tag > 3 || a->channel > 63 || a->sy > 15)
return -EINVAL;
- dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+ dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
request.tcode = TCODE_STREAM_DATA;
- request.length = p->length;
- request.closure = p->closure;
- request.data = p->data;
- request.generation = p->generation;
+ request.length = a->length;
+ request.closure = a->closure;
+ request.data = a->data;
+ request.generation = a->generation;
- return init_request(client, &request, dest, p->speed);
+ return init_request(client, &request, dest, a->speed);
}
-static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
ioctl_get_info,
ioctl_send_request,
ioctl_allocate,
@@ -1322,47 +1340,35 @@
ioctl_get_speed,
ioctl_send_broadcast_request,
ioctl_send_stream_packet,
+ ioctl_get_cycle_timer2,
};
static int dispatch_ioctl(struct client *client,
unsigned int cmd, void __user *arg)
{
- char buffer[sizeof(union {
- struct fw_cdev_get_info _00;
- struct fw_cdev_send_request _01;
- struct fw_cdev_allocate _02;
- struct fw_cdev_deallocate _03;
- struct fw_cdev_send_response _04;
- struct fw_cdev_initiate_bus_reset _05;
- struct fw_cdev_add_descriptor _06;
- struct fw_cdev_remove_descriptor _07;
- struct fw_cdev_create_iso_context _08;
- struct fw_cdev_queue_iso _09;
- struct fw_cdev_start_iso _0a;
- struct fw_cdev_stop_iso _0b;
- struct fw_cdev_get_cycle_timer _0c;
- struct fw_cdev_allocate_iso_resource _0d;
- struct fw_cdev_send_stream_packet _13;
- })];
+ union ioctl_arg buffer;
int ret;
+ if (fw_device_is_shutdown(client->device))
+ return -ENODEV;
+
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
return -EINVAL;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+ copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
}
- ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+ ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+ copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
}
@@ -1372,24 +1378,14 @@
static long fw_device_op_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, (void __user *) arg);
+ return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long fw_device_op_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, compat_ptr(arg));
+ return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
}
#endif
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcb..014cabd 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -43,7 +44,7 @@
#include "core.h"
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +60,76 @@
}
EXPORT_SYMBOL(fw_csr_iterator_next);
+static const u32 *search_leaf(const u32 *directory, int search_key)
+{
+ struct fw_csr_iterator ci;
+ int last_key = 0, key, value;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (last_key == search_key &&
+ key == (CSR_DESCRIPTOR | CSR_LEAF))
+ return ci.p - 1 + value;
+
+ last_key = key;
+ }
+
+ return NULL;
+}
+
+static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+{
+ unsigned int quadlets, i;
+ char c;
+
+ if (!size || !buf)
+ return -EINVAL;
+
+ quadlets = min(block[0] >> 16, 256U);
+ if (quadlets < 2)
+ return -ENODATA;
+
+ if (block[1] != 0 || block[2] != 0)
+ /* unknown language/character set */
+ return -ENODATA;
+
+ block += 3;
+ quadlets -= 2;
+ for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
+ c = block[i / 4] >> (24 - 8 * (i % 4));
+ if (c == '\0')
+ break;
+ buf[i] = c;
+ }
+ buf[i] = '\0';
+
+ return i;
+}
+
+/**
+ * fw_csr_string - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+ * The string is taken from a minimal ASCII text descriptor leaf after
+ * the immediate entry with @key. The string is zero-terminated.
+ * Returns strlen(buf) or a negative error code.
+ */
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
+{
+ const u32 *leaf = search_leaf(directory, key);
+ if (!leaf)
+ return -ENOENT;
+
+ return textual_leaf_to_string(leaf, buf, size);
+}
+EXPORT_SYMBOL(fw_csr_string);
+
static bool is_fw_unit(struct device *dev);
-static int match_unit_directory(u32 *directory, u32 match_flags,
+static int match_unit_directory(const u32 *directory, u32 match_flags,
const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
@@ -195,7 +263,7 @@
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
- u32 *dir;
+ const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
@@ -226,10 +294,10 @@
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
- struct fw_csr_iterator ci;
- u32 *dir, *block = NULL, *p, *end;
- int length, key, value, last_key = 0, ret = -ENOENT;
- char *b;
+ const u32 *dir;
+ size_t bufsize;
+ char dummy_buf[2];
+ int ret;
down_read(&fw_device_rwsem);
@@ -238,40 +306,23 @@
else
dir = fw_device(dev)->config_rom + 5;
- fw_csr_iterator_init(&ci, dir);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (attr->key == last_key &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
+ if (buf) {
+ bufsize = PAGE_SIZE - 1;
+ } else {
+ buf = dummy_buf;
+ bufsize = 1;
}
- if (block == NULL)
- goto out;
+ ret = fw_csr_string(dir, attr->key, buf, bufsize);
- length = min(block[0] >> 16, 256U);
- if (length < 3)
- goto out;
-
- if (block[1] != 0 || block[2] != 0)
- /* Unknown encoding. */
- goto out;
-
- if (buf == NULL) {
- ret = length * 4;
- goto out;
+ if (ret >= 0) {
+ /* Strip trailing whitespace and add newline. */
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
}
- b = buf;
- end = &block[length + 1];
- for (p = &block[3]; p < end; p++, b += 4)
- * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
- /* Strip trailing whitespace and add newline. */
- while (b--, (isspace(*b) || *b == '\0') && b > buf);
- strcpy(b + 1, "\n");
- ret = b + 2 - buf;
- out:
up_read(&fw_device_rwsem);
return ret;
@@ -371,7 +422,7 @@
return ret;
}
-static int units_sprintf(char *buf, u32 *directory)
+static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -441,28 +492,29 @@
return rcode;
}
-#define READ_BIB_ROM_SIZE 256
-#define READ_BIB_STACK_SIZE 16
+#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
- * generation changes under us, read_bus_info_block will fail and get retried.
+ * generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
*/
-static int read_bus_info_block(struct fw_device *device, int generation)
+static int read_config_rom(struct fw_device *device, int generation)
{
- u32 *rom, *stack, *old_rom, *new_rom;
+ const u32 *old_rom, *new_rom;
+ u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
- rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
- sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+ rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
+ sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
- stack = &rom[READ_BIB_ROM_SIZE];
+ stack = &rom[MAX_CONFIG_ROM_SIZE];
+ memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
@@ -529,40 +581,54 @@
*/
key = stack[--sp];
i = key & 0xffffff;
- if (i >= READ_BIB_ROM_SIZE)
- /*
- * The reference points outside the standard
- * config rom area, something's fishy.
- */
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
- i++;
- if (end > READ_BIB_ROM_SIZE)
+ if (end > MAX_CONFIG_ROM_SIZE) {
/*
- * This block extends outside standard config
- * area (and the array we're reading it
- * into). That's broken, so ignore this
- * device.
+ * This block extends outside the config ROM which is
+ * a firmware bug. Ignore this whole block, i.e.
+ * simply set a fake block length of 0.
*/
- goto out;
+ fw_error("skipped invalid ROM block %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ end = i;
+ }
+ i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
- while (i < end) {
+ for (; i < end; i++) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
goto out;
- if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
- sp < READ_BIB_STACK_SIZE)
- stack[sp++] = i + rom[i];
- i++;
+
+ if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
+ continue;
+ /*
+ * Offset points outside the ROM. May be a firmware
+ * bug or an Extended ROM entry (IEEE 1212-2001 clause
+ * 7.7.18). Simply overwrite this pointer here by a
+ * fake immediate entry so that later iterators over
+ * the ROM don't have to check offsets all the time.
+ */
+ if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
+ fw_error("skipped unsupported ROM entry %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ continue;
+ }
+ stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
@@ -905,7 +971,7 @@
* device.
*/
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
@@ -1022,7 +1088,7 @@
};
/* Reread and compare bus info block and header of root directory */
-static int reread_bus_info_block(struct fw_device *device, int generation)
+static int reread_config_rom(struct fw_device *device, int generation)
{
u32 q;
int i;
@@ -1048,7 +1114,7 @@
struct fw_card *card = device->card;
int node_id = device->node_id;
- switch (reread_bus_info_block(device, device->generation)) {
+ switch (reread_config_rom(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@@ -1082,7 +1148,7 @@
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 495849e..673b03f 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -921,23 +921,15 @@
void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
- unsigned long long bus_time;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
switch (reg) {
case CSR_CYCLE_TIME:
- case CSR_BUS_TIME:
- if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
- rcode = RCODE_TYPE_ERROR;
- break;
- }
-
- bus_time = card->driver->get_bus_time(card);
- if (reg == CSR_CYCLE_TIME)
- *data = cpu_to_be32(bus_time);
+ if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
+ *data = cpu_to_be32(card->driver->get_cycle_time(card));
else
- *data = cpu_to_be32(bus_time >> 25);
+ rcode = RCODE_TYPE_ERROR;
break;
case CSR_BROADCAST_CHANNEL:
@@ -968,6 +960,9 @@
case CSR_BUSY_TIMEOUT:
/* FIXME: Implement this. */
+ case CSR_BUS_TIME:
+ /* Useless without initialization by the bus manager. */
+
default:
rcode = RCODE_ADDRESS_ERROR;
break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index ed3b1a7..fb03213 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -70,7 +70,7 @@
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
- u64 (*get_bus_time)(struct fw_card *card);
+ u32 (*get_cycle_time)(struct fw_card *card);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 43ebf33..75dc698 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -73,20 +72,6 @@
__le16 transfer_status;
} __attribute__((aligned(16)));
-struct db_descriptor {
- __le16 first_size;
- __le16 control;
- __le16 second_req_count;
- __le16 first_req_count;
- __le32 branch_address;
- __le16 second_res_count;
- __le16 first_res_count;
- __le32 reserved0;
- __le32 first_buffer;
- __le32 second_buffer;
- __le32 reserved1;
-} __attribute__((aligned(16)));
-
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@
struct fw_card card;
__iomem char *registers;
- dma_addr_t self_id_bus;
- __le32 *self_id_cpu;
- struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
- atomic_t bus_seconds;
-
- bool use_dualbuffer;
- bool old_uninorth;
- bool bus_reset_packet_quirk;
+ unsigned quirks;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
*/
spinlock_t lock;
- u32 self_id_buffer[512];
-
- /* Config rom buffers */
- __be32 *config_rom;
- dma_addr_t config_rom_bus;
- __be32 *next_config_rom;
- dma_addr_t next_config_rom_bus;
- __be32 next_header;
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@
u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
+
+ __be32 *config_rom;
+ dma_addr_t config_rom_bus;
+ __be32 *next_config_rom;
+ dma_addr_t next_config_rom_bus;
+ __be32 next_header;
+
+ __le32 *self_id_cpu;
+ dma_addr_t self_id_bus;
+ struct tasklet_struct bus_reset_tasklet;
+
+ u32 self_id_buffer[512];
};
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,30 @@
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define QUIRK_CYCLE_TIMER 1
+#define QUIRK_RESET_PACKET 2
+#define QUIRK_BE_HEADERS 4
+
+/* In case of multiple matches in ohci_quirks[], only the first one is used. */
+static const struct {
+ unsigned short vendor, device, flags;
+} ohci_quirks[] = {
+ {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
+ {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
+};
+
+/* This overrides anything that was found in ohci_quirks[]. */
+static int param_quirks;
+module_param_named(quirks, param_quirks, int, 0644);
+MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
+ ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
+ ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
+ ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ")");
+
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +281,7 @@
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +291,6 @@
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +298,7 @@
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
+ OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@@ -524,7 +528,7 @@
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
- (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+ (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
#endif
@@ -605,7 +609,7 @@
* at a slightly incorrect time (in bus_reset_tasklet).
*/
if (evt == OHCI1394_evt_bus_reset) {
- if (!ohci->bus_reset_packet_quirk)
+ if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
@@ -1329,7 +1333,7 @@
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- if (ohci->bus_reset_packet_quirk)
+ if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
/*
@@ -1384,7 +1388,7 @@
static irqreturn_t irq_handler(int irq, void *data)
{
struct fw_ohci *ohci = data;
- u32 event, iso_event, cycle_time;
+ u32 event, iso_event;
int i;
event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1454,12 +1458,6 @@
fw_notify("isochronous cycle inconsistent\n");
}
- if (event & OHCI1394_cycle64Seconds) {
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- if ((cycle_time & 0x80000000) == 0)
- atomic_inc(&ohci->bus_seconds);
- }
-
return IRQ_HANDLED;
}
@@ -1553,8 +1551,7 @@
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent |
- OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+ OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1794,16 +1791,61 @@
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u64 ohci_get_bus_time(struct fw_card *card)
+static u32 cycle_timer_ticks(u32 cycle_timer)
+{
+ u32 ticks;
+
+ ticks = cycle_timer & 0xfff;
+ ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
+ ticks += (3072 * 8000) * (cycle_timer >> 25);
+
+ return ticks;
+}
+
+/*
+ * Some controllers exhibit one or more of the following bugs when updating the
+ * iso cycle timer register:
+ * - When the lowest six bits are wrapping around to zero, a read that happens
+ * at the same time will return garbage in the lowest ten bits.
+ * - When the cycleOffset field wraps around to zero, the cycleCount field is
+ * not incremented for about 60 ns.
+ * - Occasionally, the entire register reads zero.
+ *
+ * To catch these, we read the register three times and ensure that the
+ * difference between each two consecutive reads is approximately the same, i.e.
+ * less than twice the other. Furthermore, any negative difference indicates an
+ * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
+ * execute, so we have enough precision to compute the ratio of the differences.)
+ */
+static u32 ohci_get_cycle_time(struct fw_card *card)
{
struct fw_ohci *ohci = fw_ohci(card);
- u32 cycle_time;
- u64 bus_time;
+ u32 c0, c1, c2;
+ u32 t0, t1, t2;
+ s32 diff01, diff12;
+ int i;
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- return bus_time;
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+ i = 0;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ do {
+ c0 = c1;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ t0 = cycle_timer_ticks(c0);
+ t1 = cycle_timer_ticks(c1);
+ t2 = cycle_timer_ticks(c2);
+ diff01 = t1 - t0;
+ diff12 = t2 - t1;
+ } while ((diff01 <= 0 || diff12 <= 0 ||
+ diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
+ && i++ < 20);
+ }
+
+ return c2;
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1828,52 +1870,6 @@
ctx->header_length += ctx->base.header_size;
}
-static int handle_ir_dualbuffer_packet(struct context *context,
- struct descriptor *d,
- struct descriptor *last)
-{
- struct iso_context *ctx =
- container_of(context, struct iso_context, context);
- struct db_descriptor *db = (struct db_descriptor *) d;
- __le32 *ir_header;
- size_t header_length;
- void *p, *end;
-
- if (db->first_res_count != 0 && db->second_res_count != 0) {
- if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
- /* This descriptor isn't done yet, stop iteration. */
- return 0;
- }
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
- }
-
- header_length = le16_to_cpu(db->first_req_count) -
- le16_to_cpu(db->first_res_count);
-
- p = db + 1;
- end = p + header_length;
- while (p < end) {
- copy_iso_headers(ctx, p);
- ctx->excess_bytes +=
- (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
- p += max(ctx->base.header_size, (size_t)8);
- }
-
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
- le16_to_cpu(db->second_res_count);
-
- if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
- ir_header = (__le32 *) (db + 1);
- ctx->base.callback(&ctx->base,
- le32_to_cpu(ir_header[0]) & 0xffff,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
- ctx->header_length = 0;
- }
-
- return 1;
-}
-
static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1960,10 +1956,7 @@
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
- if (ohci->use_dualbuffer)
- callback = handle_ir_dualbuffer_packet;
- else
- callback = handle_ir_packet_per_buffer;
+ callback = handle_ir_packet_per_buffer;
}
spin_lock_irqsave(&ohci->lock, flags);
@@ -2026,8 +2019,6 @@
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
- if (ohci->use_dualbuffer)
- control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@@ -2188,92 +2179,6 @@
return 0;
}
-static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
-{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
- struct db_descriptor *db = NULL;
- struct descriptor *d;
- struct fw_iso_packet *p;
- dma_addr_t d_bus, page_bus;
- u32 z, header_z, length, rest;
- int page, offset, packet_count, header_size;
-
- /*
- * FIXME: Cycle lost behavior should be configurable: lose
- * packet, retransmit or terminate..
- */
-
- p = packet;
- z = 2;
-
- /*
- * The OHCI controller puts the isochronous header and trailer in the
- * buffer, so we need at least 8 bytes.
- */
- packet_count = p->header_length / ctx->base.header_size;
- header_size = packet_count * max(ctx->base.header_size, (size_t)8);
-
- /* Get header size in number of descriptors. */
- header_z = DIV_ROUND_UP(header_size, sizeof(*d));
- page = payload >> PAGE_SHIFT;
- offset = payload & ~PAGE_MASK;
- rest = p->payload_length;
- /*
- * The controllers I've tested have not worked correctly when
- * second_req_count is zero. Rather than do something we know won't
- * work, return an error
- */
- if (rest == 0)
- return -EINVAL;
-
- while (rest > 0) {
- d = context_get_descriptors(&ctx->context,
- z + header_z, &d_bus);
- if (d == NULL)
- return -ENOMEM;
-
- db = (struct db_descriptor *) d;
- db->control = cpu_to_le16(DESCRIPTOR_STATUS |
- DESCRIPTOR_BRANCH_ALWAYS);
- db->first_size =
- cpu_to_le16(max(ctx->base.header_size, (size_t)8));
- if (p->skip && rest == p->payload_length) {
- db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
- db->first_req_count = db->first_size;
- } else {
- db->first_req_count = cpu_to_le16(header_size);
- }
- db->first_res_count = db->first_req_count;
- db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
-
- if (p->skip && rest == p->payload_length)
- length = 4;
- else if (offset + rest < PAGE_SIZE)
- length = rest;
- else
- length = PAGE_SIZE - offset;
-
- db->second_req_count = cpu_to_le16(length);
- db->second_res_count = db->second_req_count;
- page_bus = page_private(buffer->pages[page]);
- db->second_buffer = cpu_to_le32(page_bus + offset);
-
- if (p->interrupt && length == rest)
- db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
- context_append(&ctx->context, d, z, header_z);
- offset = (offset + length) & ~PAGE_MASK;
- rest -= length;
- if (offset == 0)
- page++;
- }
-
- return 0;
-}
-
static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2364,9 +2269,6 @@
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else if (ctx->context.ohci->use_dualbuffer)
- ret = ohci_queue_iso_receive_dualbuffer(base, packet,
- buffer, payload);
else
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
@@ -2383,7 +2285,7 @@
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
- .get_bus_time = ohci_get_bus_time,
+ .get_cycle_time = ohci_get_cycle_time,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
@@ -2421,17 +2323,13 @@
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
-#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
-#define PCI_DEVICE_ID_AGERE_FW643 0x5901
-#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
-
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int err;
+ int i, err, n_ir, n_it;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2472,36 +2370,15 @@
goto fail_iomem;
}
- version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-#if 0
- /* FIXME: make it a context option or remove dual-buffer mode */
- ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
-#endif
-
- /* dual-buffer mode is broken if more than one IR context is active */
- if (dev->vendor == PCI_VENDOR_ID_AGERE &&
- dev->device == PCI_DEVICE_ID_AGERE_FW643)
- ohci->use_dualbuffer = false;
-
- /* dual-buffer mode is broken */
- if (dev->vendor == PCI_VENDOR_ID_RICOH &&
- dev->device == PCI_DEVICE_ID_RICOH_R5C832)
- ohci->use_dualbuffer = false;
-
-/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
-#if !defined(CONFIG_X86_32)
- /* dual-buffer mode is broken with descriptor addresses above 2G */
- if (dev->vendor == PCI_VENDOR_ID_TI &&
- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
- dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
- ohci->use_dualbuffer = false;
-#endif
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
- ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
- ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+ for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
+ if (ohci_quirks[i].vendor == dev->vendor &&
+ (ohci_quirks[i].device == dev->device ||
+ ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
+ ohci->quirks = ohci_quirks[i].flags;
+ break;
+ }
+ if (param_quirks)
+ ohci->quirks = param_quirks;
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2516,17 +2393,19 @@
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
- ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ ohci->ir_context_channels = ~0ULL;
+ ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
- ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+ n_ir = hweight32(ohci->ir_context_mask);
+ size = sizeof(struct iso_context) * n_ir;
+ ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
- ohci->ir_context_channels = ~0ULL;
- ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
- ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+ n_it = hweight32(ohci->it_context_mask);
+ size = sizeof(struct iso_context) * n_it;
+ ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
err = -ENOMEM;
@@ -2553,8 +2432,11 @@
if (err)
goto fail_self_id;
- fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
- dev_name(&dev->dev), version >> 16, version & 0xff);
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
+ "%d IR + %d IT contexts, quirks 0x%x\n",
+ dev_name(&dev->dev), version >> 16, version & 0xff,
+ n_ir, n_it, ohci->quirks);
return 0;
@@ -2662,7 +2544,7 @@
}
#endif
-static struct pci_device_id pci_table[] = {
+static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 70fef40..ca264f2 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@
return 0;
}
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
+ const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -1027,7 +1028,7 @@
return 0;
}
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 867e084..433602a 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -265,9 +265,10 @@
static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
- int res;
+ int res, i;
- int i = iminor(inode) - HIDDEV_MINOR_BASE;
+ lock_kernel();
+ i = iminor(inode) - HIDDEV_MINOR_BASE;
if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i])
return -ENODEV;
@@ -313,10 +314,12 @@
usbhid_open(hid);
}
+ unlock_kernel();
return 0;
bail:
file->private_data = NULL;
kfree(list);
+ unlock_kernel();
return res;
}
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8d8a00e..02ce9cf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -61,6 +61,16 @@
In doubt, say Y.
+config I2C_SMBUS
+ tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
+ help
+ Say Y here if you want support for SMBus extensions to the I2C
+ specification. At the moment, the only supported extension is
+ the SMBus alert protocol.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-smbus.
+
source drivers/i2c/algos/Kconfig
source drivers/i2c/busses/Kconfig
source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index ba26e6c..acd0250 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
+obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-y += busses/ chips/ algos/
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 78d42aa..dcdaf8e 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -453,8 +453,6 @@
*/
int raise_fall_time;
- struct i2c_algo_pca_data *pca_data = adap->algo_data;
-
/* Ignore the reset function from the module,
* we can use the parallel bus reset
*/
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 737f052..4cc3807 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@
will be called i2c-amd8111.
config I2C_I801
- tristate "Intel 82801 (ICH)"
+ tristate "Intel 82801 (ICH/PCH)"
depends on PCI
help
If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@
ICH9
Tolapai
ICH10
- PCH
+ 3400/5 Series (PCH)
+ Cougar Point (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -580,6 +581,7 @@
tristate "Parallel port adapter"
depends on PARPORT
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
@@ -603,6 +605,7 @@
config I2C_PARPORT_LIGHT
tristate "Parallel port adapter (light)"
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b..bd8f1e4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali1535_ids[] = {
+static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4687af4..a409cfc 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@
ali1563_shutdown(dev);
}
-static struct pci_device_id __devinitdata ali1563_id_table[] = {
+static const struct pci_device_id ali1563_id_table[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205..659f63f 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali15x3_ids[] = {
+static const struct pci_device_id ali15x3_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90e..c5a9fa4 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@
"nVidia nForce", "AMD8111",
};
-static struct pci_device_id amd756_ids[] = {
+static const struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86..d0dc970 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@
};
-static struct pci_device_id amd8111_ids[] = {
+static const struct pci_device_id amd8111_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b84..c767295 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@
.algo_data = &hydra_bit_data,
};
-static struct pci_device_id hydra_ids[] = {
+static const struct pci_device_id hydra_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553..9da5b05 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- PCH 0x3b30 32 hard yes yes yes
+ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -561,7 +562,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id i801_ids[] = {
+static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -578,6 +579,7 @@
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
{ 0, }
};
@@ -707,6 +709,7 @@
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb0..69c22f7 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id sch_ids[] = {
+static const struct pci_device_id sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c..4a70058 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@
};
-static struct pci_device_id nforce2_ids[] = {
+static const struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 322c569..5f41ec0 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport-light.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-velleman.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,10 +27,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include <asm/io.h>
#include "i2c-parport.h"
@@ -43,6 +45,10 @@
module_param(base, ushort, 0);
MODULE_PARM_DESC(base, "Base I/O address");
+static int irq;
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "IRQ (optional)");
+
/* ----- Low-level parallel port access ----------------------------------- */
static inline void port_write(unsigned char p, unsigned char d)
@@ -119,6 +125,16 @@
.name = "Parallel port adapter (light)",
};
+/* SMBus alert support */
+static struct i2c_smbus_alert_setup alert_data = {
+ .alert_edge_triggered = 1,
+};
+static struct i2c_client *ara;
+static struct lineop parport_ctrl_irq = {
+ .val = (1 << 4),
+ .port = CTRL,
+};
+
static int __devinit i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -127,18 +143,39 @@
parport_setsda(NULL, 1);
parport_setscl(NULL, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
parport_adapter.dev.parent = &pdev->dev;
err = i2c_bit_add_bus(&parport_adapter);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Unable to register with I2C\n");
- return err;
+ return err;
+ }
+
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert && irq) {
+ alert_data.irq = irq;
+ ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
+ if (ara)
+ line_set(1, &parport_ctrl_irq);
+ else
+ dev_warn(&pdev->dev, "Failed to register ARA client\n");
+ }
+
+ return 0;
}
static int __devexit i2c_parport_remove(struct platform_device *pdev)
{
+ if (ara) {
+ line_set(0, &parport_ctrl_irq);
+ i2c_unregister_device(ara);
+ ara = NULL;
+ }
i2c_del_adapter(&parport_adapter);
/* Un-init if needed (power off...) */
@@ -205,6 +242,9 @@
if (!request_region(base, 3, DRVNAME))
return -EBUSY;
+ if (irq != 0)
+ pr_info(DRVNAME ": using irq %d\n", irq);
+
if (!adapter_parm[type].getscl.val)
parport_algo_data.getscl = NULL;
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0d89986..220fca7 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,9 +27,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/parport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include "i2c-parport.h"
/* ----- Device list ------------------------------------------------------ */
@@ -38,6 +40,8 @@
struct pardevice *pdev;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
struct i2c_par *next;
};
@@ -143,6 +147,19 @@
/* ----- I2c and parallel port call-back functions and structures --------- */
+void i2c_parport_irq(void *data)
+{
+ struct i2c_par *adapter = data;
+ struct i2c_client *ara = adapter->ara;
+
+ if (ara) {
+ dev_dbg(&ara->dev, "SMBus alert received\n");
+ i2c_handle_smbus_alert(ara);
+ } else
+ dev_dbg(&adapter->adapter.dev,
+ "SMBus alert received but no ARA client!\n");
+}
+
static void i2c_parport_attach (struct parport *port)
{
struct i2c_par *adapter;
@@ -154,8 +171,9 @@
}
pr_debug("i2c-parport: attaching to %s\n", port->name);
+ parport_disable_irq(port);
adapter->pdev = parport_register_device(port, "i2c-parport",
- NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL);
+ NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
if (!adapter->pdev) {
printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
goto ERROR0;
@@ -185,14 +203,29 @@
parport_setsda(port, 1);
parport_setscl(port, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(port, 1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
if (i2c_bit_add_bus(&adapter->adapter) < 0) {
printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
goto ERROR1;
}
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert) {
+ adapter->alert_data.alert_edge_triggered = 1;
+ adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
+ &adapter->alert_data);
+ if (adapter->ara)
+ parport_enable_irq(port);
+ else
+ printk(KERN_WARNING "i2c-parport: Failed to register "
+ "ARA client\n");
+ }
+
/* Add the new adapter to the list */
adapter->next = adapter_list;
adapter_list = adapter;
@@ -213,6 +246,10 @@
for (prev = NULL, adapter = adapter_list; adapter;
prev = adapter, adapter = adapter->next) {
if (adapter->pdev->port == port) {
+ if (adapter->ara) {
+ parport_disable_irq(port);
+ i2c_unregister_device(adapter->ara);
+ }
i2c_del_adapter(&adapter->adapter);
/* Un-init if needed (power off...) */
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index ed69d84..a9f6681 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.h I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@
struct lineop getsda;
struct lineop getscl;
struct lineop init;
+ unsigned int smbus_alert:1;
};
static struct adapter_parm adapter_parm[] = {
@@ -73,6 +74,7 @@
.setscl = { 0x01, DATA, 1 },
.getsda = { 0x10, STAT, 1 },
.init = { 0xf0, DATA, 0 },
+ .smbus_alert = 1,
},
/* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
{
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb..0d20ff4 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@
kfree(smbus);
}
-static struct pci_device_id pasemi_smb_ids[] = {
+static const struct pci_device_id pasemi_smb_ids[] = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e56e4b6..ee9da6f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id piix4_ids[] = {
+static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f..55a7137 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis5595_ids[] __devinitdata = {
+static const struct pci_device_id sis5595_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7a..2309c7f 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis630_ids[] __devinitdata = {
+static const struct pci_device_id sis630_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963..d43d8f8 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis96x_ids[] = {
+static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e29b6d5..b5b1bbf 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -31,11 +31,13 @@
#define CMD_I2C_IO_BEGIN (1<<0)
#define CMD_I2C_IO_END (1<<1)
-/* i2c bit delay, default is 10us -> 100kHz */
+/* i2c bit delay, default is 10us -> 100kHz max
+ (in practice, due to additional delays in the i2c bitbanging
+ code this results in a i2c clock of about 50kHz) */
static unsigned short delay = 10;
module_param(delay, ushort, 0);
-MODULE_PARM_DESC(delay, "bit delay in microseconds, "
- "e.g. 10 for 100kHz (default is 100kHz)");
+MODULE_PARM_DESC(delay, "bit delay in microseconds "
+ "(default is 10us for 100kHz max)");
static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len);
@@ -137,7 +139,7 @@
* Future Technology Devices International Ltd., later a pair was
* bought from EZPrototypes
*/
-static struct usb_device_id i2c_tiny_usb_table [] = {
+static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
{ } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f19..de78283 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@
};
-static struct pci_device_id vt586b_ids[] __devinitdata = {
+static const struct pci_device_id vt586b_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index a84a909..d57292e 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -444,7 +444,7 @@
return error;
}
-static struct pci_device_id vt596_ids[] = {
+static const struct pci_device_id vt596_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5..3202a86 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -34,6 +34,7 @@
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/rwsem.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
@@ -184,6 +185,52 @@
#define i2c_device_pm_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int i2c_device_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_suspend)
+ return 0;
+ return pm->runtime_suspend(dev);
+}
+
+static int i2c_device_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_resume)
+ return 0;
+ return pm->runtime_resume(dev);
+}
+
+static int i2c_device_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = NULL;
+ int ret;
+
+ if (dev->driver)
+ pm = dev->driver->pm;
+ if (pm && pm->runtime_idle) {
+ ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_suspend(dev);
+}
+#else
+#define i2c_device_runtime_suspend NULL
+#define i2c_device_runtime_resume NULL
+#define i2c_device_runtime_idle NULL
+#endif
+
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -251,6 +298,9 @@
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
+ .runtime_suspend = i2c_device_runtime_suspend,
+ .runtime_resume = i2c_device_runtime_resume,
+ .runtime_idle = i2c_device_runtime_idle,
};
struct bus_type i2c_bus_type = {
@@ -1133,7 +1183,7 @@
* i2c_master_send - issue a single I2C message in master transmit mode
* @client: Handle to slave device
* @buf: Data that will be written to the slave
- * @count: How many bytes to write
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes written.
*/
@@ -1160,7 +1210,7 @@
* i2c_master_recv - issue a single I2C message in master receive mode
* @client: Handle to slave device
* @buf: Where to store data read from slave
- * @count: How many bytes to read
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes read.
*/
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
new file mode 100644
index 0000000..4212782
--- /dev/null
+++ b/drivers/i2c/i2c-smbus.c
@@ -0,0 +1,263 @@
+/*
+ * i2c-smbus.c - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+
+struct i2c_smbus_alert {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+ struct work_struct alert;
+ struct i2c_client *ara; /* Alert response address */
+};
+
+struct alert_data {
+ unsigned short addr;
+ u8 flag:1;
+};
+
+/* If this is the alerting device, notify its driver */
+static int smbus_do_alert(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct alert_data *data = addrp;
+
+ if (!client || client->addr != data->addr)
+ return 0;
+ if (client->flags & I2C_CLIENT_TEN)
+ return 0;
+
+ /*
+ * Drivers should either disable alerts, or provide at least
+ * a minimal handler. Lock so client->driver won't change.
+ */
+ down(&dev->sem);
+ if (client->driver) {
+ if (client->driver->alert)
+ client->driver->alert(client, data->flag);
+ else
+ dev_warn(&client->dev, "no driver alert()!\n");
+ } else
+ dev_dbg(&client->dev, "alert with no driver\n");
+ up(&dev->sem);
+
+ /* Stop iterating after we find the device */
+ return -EBUSY;
+}
+
+/*
+ * The alert IRQ handler needs to hand work off to a task which can issue
+ * SMBus calls, because those sleeping calls can't be made in IRQ context.
+ */
+static void smbus_alert(struct work_struct *work)
+{
+ struct i2c_smbus_alert *alert;
+ struct i2c_client *ara;
+ unsigned short prev_addr = 0; /* Not a valid address */
+
+ alert = container_of(work, struct i2c_smbus_alert, alert);
+ ara = alert->ara;
+
+ for (;;) {
+ s32 status;
+ struct alert_data data;
+
+ /*
+ * Devices with pending alerts reply in address order, low
+ * to high, because of slave transmit arbitration. After
+ * responding, an SMBus device stops asserting SMBALERT#.
+ *
+ * Note that SMBus 2.0 reserves 10-bit addresess for future
+ * use. We neither handle them, nor try to use PEC here.
+ */
+ status = i2c_smbus_read_byte(ara);
+ if (status < 0)
+ break;
+
+ data.flag = status & 1;
+ data.addr = status >> 1;
+
+ if (data.addr == prev_addr) {
+ dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
+ "0x%02x, skipping\n", data.addr);
+ break;
+ }
+ dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
+ data.addr, data.flag);
+
+ /* Notify driver for the device which issued the alert */
+ device_for_each_child(&ara->adapter->dev, &data,
+ smbus_do_alert);
+ prev_addr = data.addr;
+ }
+
+ /* We handled all alerts; re-enable level-triggered IRQs */
+ if (!alert->alert_edge_triggered)
+ enable_irq(alert->irq);
+}
+
+static irqreturn_t smbalert_irq(int irq, void *d)
+{
+ struct i2c_smbus_alert *alert = d;
+
+ /* Disable level-triggered IRQs until we handle them */
+ if (!alert->alert_edge_triggered)
+ disable_irq_nosync(irq);
+
+ schedule_work(&alert->alert);
+ return IRQ_HANDLED;
+}
+
+/* Setup SMBALERT# infrastructure */
+static int smbalert_probe(struct i2c_client *ara,
+ const struct i2c_device_id *id)
+{
+ struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
+ struct i2c_smbus_alert *alert;
+ struct i2c_adapter *adapter = ara->adapter;
+ int res;
+
+ alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
+ if (!alert)
+ return -ENOMEM;
+
+ alert->alert_edge_triggered = setup->alert_edge_triggered;
+ alert->irq = setup->irq;
+ INIT_WORK(&alert->alert, smbus_alert);
+ alert->ara = ara;
+
+ if (setup->irq > 0) {
+ res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
+ 0, "smbus_alert", alert);
+ if (res) {
+ kfree(alert);
+ return res;
+ }
+ }
+
+ i2c_set_clientdata(ara, alert);
+ dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
+ setup->alert_edge_triggered ? "edge" : "level");
+
+ return 0;
+}
+
+/* IRQ resource is managed so it is freed automatically */
+static int smbalert_remove(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ cancel_work_sync(&alert->alert);
+
+ i2c_set_clientdata(ara, NULL);
+ kfree(alert);
+ return 0;
+}
+
+static const struct i2c_device_id smbalert_ids[] = {
+ { "smbus_alert", 0 },
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i2c, smbalert_ids);
+
+static struct i2c_driver smbalert_driver = {
+ .driver = {
+ .name = "smbus_alert",
+ },
+ .probe = smbalert_probe,
+ .remove = smbalert_remove,
+ .id_table = smbalert_ids,
+};
+
+/**
+ * i2c_setup_smbus_alert - Setup SMBus alert support
+ * @adapter: the target adapter
+ * @setup: setup data for the SMBus alert handler
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus alert protocol on a given I2C bus segment.
+ *
+ * Handling can be done either through our IRQ handler, or by the
+ * adapter (from its handler, periodic polling, or whatever).
+ *
+ * NOTE that if we manage the IRQ, we *MUST* know if it's level or
+ * edge triggered in order to hand it to the workqueue correctly.
+ * If triggering the alert seems to wedge the system, you probably
+ * should have said it's level triggered.
+ *
+ * This returns the ara client, which should be saved for later use with
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
+ * to indicate an error.
+ */
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
+{
+ struct i2c_board_info ara_board_info = {
+ I2C_BOARD_INFO("smbus_alert", 0x0c),
+ .platform_data = setup,
+ };
+
+ return i2c_new_device(adapter, &ara_board_info);
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+
+/**
+ * i2c_handle_smbus_alert - Handle an SMBus alert
+ * @ara: the ARA client on the relevant adapter
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the alert work, in turn calling the
+ * corresponding I2C device driver's alert function.
+ *
+ * It is assumed that ara is a valid i2c client previously returned by
+ * i2c_setup_smbus_alert().
+ */
+int i2c_handle_smbus_alert(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ return schedule_work(&alert->alert);
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
+
+static int __init i2c_smbus_init(void)
+{
+ return i2c_add_driver(&smbalert_driver);
+}
+
+static void __exit i2c_smbus_exit(void)
+{
+ i2c_del_driver(&smbalert_driver);
+}
+
+module_init(i2c_smbus_init);
+module_exit(i2c_smbus_exit);
+
+MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("SMBus protocol extensions support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67..975adce 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
+ select ANON_INODES
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f504c9b..1b09b73 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1215,15 +1215,18 @@
ucm_dev = container_of(dev, struct ib_ucm_device, dev);
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(ucm_dev->devnum, dev_map);
+ else
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
kfree(ucm_dev);
}
static const struct file_operations ucm_fops = {
- .owner = THIS_MODULE,
- .open = ib_ucm_open,
+ .owner = THIS_MODULE,
+ .open = ib_ucm_open,
.release = ib_ucm_close,
- .write = ib_ucm_write,
+ .write = ib_ucm_write,
.poll = ib_ucm_poll,
};
@@ -1237,8 +1240,32 @@
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
+ "infiniband_cm");
+ if (ret) {
+ printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
+ if (ret >= IB_UCM_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_ucm_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext ||
@@ -1251,16 +1278,25 @@
ucm_dev->ib_dev = device;
- ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
- goto err;
+ devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
+ if (devnum >= IB_UCM_MAX_DEVICES) {
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
- set_bit(ucm_dev->devnum, dev_map);
+ ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ ucm_dev->devnum = devnum;
+ base = devnum + IB_UCM_BASE_DEV;
+ set_bit(devnum, dev_map);
+ }
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
- if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
+ if (cdev_add(&ucm_dev->cdev, base, 1))
goto err;
ucm_dev->dev.class = &cm_class;
@@ -1281,7 +1317,10 @@
device_unregister(&ucm_dev->dev);
err_cdev:
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kfree(ucm_dev);
return;
@@ -1340,6 +1379,8 @@
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version);
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 8ec7876..650b501 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -181,6 +181,7 @@
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
* @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @immediate_present: specify if immediate data should be used
* @header:Structure to initialize
*
* ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
@@ -191,21 +192,13 @@
*/
void ib_ud_header_init(int payload_bytes,
int grh_present,
+ int immediate_present,
struct ib_ud_header *header)
{
- int header_len;
u16 packet_length;
memset(header, 0, sizeof *header);
- header_len =
- IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES;
- if (grh_present) {
- header_len += IB_GRH_BYTES;
- }
-
header->lrh.link_version = 0;
header->lrh.link_next_header =
grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
@@ -231,7 +224,8 @@
header->lrh.packet_length = cpu_to_be16(packet_length);
- if (header->immediate_present)
+ header->immediate_present = immediate_present;
+ if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
else
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096..4f906f0 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -136,7 +136,7 @@
down_write(¤t->mm->mmap_sem);
locked = npages + current->mm->locked_vm;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7de0296..02d360c 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -65,12 +65,9 @@
};
/*
- * Our lifetime rules for these structs are the following: each time a
- * device special file is opened, we look up the corresponding struct
- * ib_umad_port by minor in the umad_port[] table while holding the
- * port_lock. If this lookup succeeds, we take a reference on the
- * ib_umad_port's struct ib_umad_device while still holding the
- * port_lock; if the lookup fails, we fail the open(). We drop these
+ * Our lifetime rules for these structs are the following:
+ * device special file is opened, we take a reference on the
+ * ib_umad_port's struct ib_umad_device. We drop these
* references in the corresponding close().
*
* In addition to references coming from open character devices, there
@@ -78,19 +75,14 @@
* module's reference taken when allocating the ib_umad_device in
* ib_umad_add_one().
*
- * When destroying an ib_umad_device, we clear all of its
- * ib_umad_ports from umad_port[] while holding port_lock before
- * dropping the module's reference to the ib_umad_device. This is
- * always safe because any open() calls will either succeed and obtain
- * a reference before we clear the umad_port[] entries, or fail after
- * we clear the umad_port[] entries.
+ * When destroying an ib_umad_device, we drop the module's reference.
*/
struct ib_umad_port {
- struct cdev *cdev;
+ struct cdev cdev;
struct device *dev;
- struct cdev *sm_cdev;
+ struct cdev sm_cdev;
struct device *sm_dev;
struct semaphore sm_sem;
@@ -136,7 +128,6 @@
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
static DEFINE_SPINLOCK(port_lock);
-static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -496,8 +487,8 @@
ah_attr.ah_flags = IB_AH_GRH;
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
- ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
- ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
+ ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
+ ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
}
@@ -528,9 +519,9 @@
goto err_ah;
}
- packet->msg->ah = ah;
+ packet->msg->ah = ah;
packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
- packet->msg->retries = packet->mad.hdr.retries;
+ packet->msg->retries = packet->mad.hdr.retries;
packet->msg->context[0] = packet;
/* Copy MAD header. Any RMPP header is already in place. */
@@ -779,15 +770,11 @@
/*
* ib_umad_open() does not need the BKL:
*
- * - umad_port[] accesses are protected by port_lock, the
- * ib_umad_port structures are properly reference counted, and
+ * - the ib_umad_port structures are properly reference counted, and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - the ioctl method does not affect any global state outside of the
* file structure being operated on;
- * - the port is added to umad_port[] as the last part of module
- * initialization so the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
*/
static int ib_umad_open(struct inode *inode, struct file *filp)
{
@@ -795,13 +782,10 @@
struct ib_umad_file *file;
int ret = 0;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
+ port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
mutex_lock(&port->file_mutex);
@@ -872,16 +856,16 @@
}
static const struct file_operations umad_fops = {
- .owner = THIS_MODULE,
- .read = ib_umad_read,
- .write = ib_umad_write,
- .poll = ib_umad_poll,
+ .owner = THIS_MODULE,
+ .read = ib_umad_read,
+ .write = ib_umad_write,
+ .poll = ib_umad_poll,
.unlocked_ioctl = ib_umad_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = ib_umad_compat_ioctl,
+ .compat_ioctl = ib_umad_compat_ioctl,
#endif
- .open = ib_umad_open,
- .release = ib_umad_close
+ .open = ib_umad_open,
+ .release = ib_umad_close
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -892,13 +876,10 @@
};
int ret;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
+ port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
@@ -949,8 +930,8 @@
}
static const struct file_operations umad_sm_fops = {
- .owner = THIS_MODULE,
- .open = ib_umad_sm_open,
+ .owner = THIS_MODULE,
+ .open = ib_umad_sm_open,
.release = ib_umad_sm_close
};
@@ -990,16 +971,51 @@
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
+ "infiniband_mad");
+ if (ret) {
+ printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
+ if (ret >= IB_UMAD_MAX_PORTS)
+ return -1;
+
+ return ret;
+}
+
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_port *port)
{
+ int devnum;
+ dev_t base;
+
spin_lock(&port_lock);
- port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (port->dev_num >= IB_UMAD_MAX_PORTS) {
+ devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
+ if (devnum >= IB_UMAD_MAX_PORTS) {
spin_unlock(&port_lock);
- return -1;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ return -1;
+
+ spin_lock(&port_lock);
+ port->dev_num = devnum + IB_UMAD_MAX_PORTS;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ port->dev_num = devnum;
+ base = devnum + base_dev;
+ set_bit(devnum, dev_map);
}
- set_bit(port->dev_num, dev_map);
spin_unlock(&port_lock);
port->ib_dev = device;
@@ -1008,17 +1024,14 @@
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
- port->cdev = cdev_alloc();
- if (!port->cdev)
- return -1;
- port->cdev->owner = THIS_MODULE;
- port->cdev->ops = &umad_fops;
- kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
- if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
+ cdev_init(&port->cdev, &umad_fops);
+ port->cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
+ if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dma_device,
- port->cdev->dev, port,
+ port->cdev.dev, port,
"umad%d", port->dev_num);
if (IS_ERR(port->dev))
goto err_cdev;
@@ -1028,17 +1041,15 @@
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- port->sm_cdev = cdev_alloc();
- if (!port->sm_cdev)
- goto err_dev;
- port->sm_cdev->owner = THIS_MODULE;
- port->sm_cdev->ops = &umad_sm_fops;
- kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
- if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
+ base += IB_UMAD_MAX_PORTS;
+ cdev_init(&port->sm_cdev, &umad_sm_fops);
+ port->sm_cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
+ if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dma_device,
- port->sm_cdev->dev, port,
+ port->sm_cdev.dev, port,
"issm%d", port->dev_num);
if (IS_ERR(port->sm_dev))
goto err_sm_cdev;
@@ -1048,24 +1059,23 @@
if (device_create_file(port->sm_dev, &dev_attr_port))
goto err_sm_dev;
- spin_lock(&port_lock);
- umad_port[port->dev_num] = port;
- spin_unlock(&port_lock);
-
return 0;
err_sm_dev:
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
err_sm_cdev:
- cdev_del(port->sm_cdev);
+ cdev_del(&port->sm_cdev);
err_dev:
- device_destroy(umad_class, port->cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
err_cdev:
- cdev_del(port->cdev);
- clear_bit(port->dev_num, dev_map);
+ cdev_del(&port->cdev);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
return -1;
}
@@ -1079,15 +1089,11 @@
dev_set_drvdata(port->dev, NULL);
dev_set_drvdata(port->sm_dev, NULL);
- device_destroy(umad_class, port->cdev->dev);
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
- cdev_del(port->cdev);
- cdev_del(port->sm_cdev);
-
- spin_lock(&port_lock);
- umad_port[port->dev_num] = NULL;
- spin_unlock(&port_lock);
+ cdev_del(&port->cdev);
+ cdev_del(&port->sm_cdev);
mutex_lock(&port->file_mutex);
@@ -1106,7 +1112,10 @@
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(port->dev_num, dev_map);
+ else
+ clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1214,6 +1223,8 @@
ib_unregister_client(&umad_client);
class_destroy(umad_class);
unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea958..e54d9ac 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/completion.h>
+#include <linux/cdev.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -69,23 +70,23 @@
struct ib_uverbs_device {
struct kref ref;
+ int num_comp_vectors;
struct completion comp;
- int devnum;
- struct cdev *cdev;
struct device *dev;
struct ib_device *ib_dev;
- int num_comp_vectors;
+ int devnum;
+ struct cdev cdev;
};
struct ib_uverbs_event_file {
struct kref ref;
+ int is_async;
struct ib_uverbs_file *uverbs_file;
spinlock_t lock;
+ int is_closed;
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
- int is_async;
- int is_closed;
};
struct ib_uverbs_file {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ff..ff59a79 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -42,8 +42,8 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/file.h>
-#include <linux/mount.h>
#include <linux/cdev.h>
+#include <linux/anon_inodes.h>
#include <asm/uaccess.h>
@@ -53,8 +53,6 @@
MODULE_DESCRIPTION("InfiniBand userspace verbs access");
MODULE_LICENSE("Dual BSD/GPL");
-#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
-
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
@@ -75,44 +73,41 @@
DEFINE_IDR(ib_uverbs_srq_idr);
static DEFINE_SPINLOCK(map_lock);
-static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len) = {
- [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
- [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
- [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
- [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
- [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
- [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
- [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
+ [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
+ [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
+ [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
+ [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
+ [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
+ [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
+ [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
- [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
- [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
- [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
- [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
- [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
- [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
- [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
- [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
- [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
- [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
- [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
- [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
- [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
- [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
- [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
- [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
- [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
- [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
- [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
- [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
+ [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
+ [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
+ [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
+ [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
+ [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
+ [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
+ [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
+ [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
+ [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
+ [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
+ [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
+ [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
+ [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
+ [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
+ [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
+ [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
+ [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
+ [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
+ [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
+ [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
};
-static struct vfsmount *uverbs_event_mnt;
-
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device);
@@ -370,7 +365,7 @@
static const struct file_operations uverbs_event_fops = {
.owner = THIS_MODULE,
- .read = ib_uverbs_event_read,
+ .read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
.fasync = ib_uverbs_event_fasync
@@ -492,7 +487,6 @@
int is_async, int *fd)
{
struct ib_uverbs_event_file *ev_file;
- struct path path;
struct file *filp;
int ret;
@@ -515,27 +509,16 @@
goto err;
}
- /*
- * fops_get() can't fail here, because we're coming from a
- * system call on a uverbs file, which will already have a
- * module reference.
- */
- path.mnt = uverbs_event_mnt;
- path.dentry = uverbs_event_mnt->mnt_root;
- path_get(&path);
- filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
+ filp = anon_inode_getfile("[uverbs-event]", &uverbs_event_fops,
+ ev_file, O_RDONLY);
if (!filp) {
ret = -ENFILE;
goto err_fd;
}
- filp->private_data = ev_file;
-
return filp;
err_fd:
- fops_put(&uverbs_event_fops);
- path_put(&path);
put_unused_fd(*fd);
err:
@@ -617,14 +600,12 @@
/*
* ib_uverbs_open() does not need the BKL:
*
- * - dev_table[] accesses are protected by map_lock, the
- * ib_uverbs_device structures are properly reference counted, and
+ * - the ib_uverbs_device structures are properly reference counted and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - there is no ioctl method to race against;
- * - the device is added to dev_table[] as the last part of module
- * initialization, the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
+ * - the open method will either immediately run -ENXIO, or all
+ * required initialization will be done.
*/
static int ib_uverbs_open(struct inode *inode, struct file *filp)
{
@@ -632,13 +613,10 @@
struct ib_uverbs_file *file;
int ret;
- spin_lock(&map_lock);
- dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
+ dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
if (dev)
kref_get(&dev->ref);
- spin_unlock(&map_lock);
-
- if (!dev)
+ else
return -ENXIO;
if (!try_module_get(dev->ib_dev->owner)) {
@@ -685,17 +663,17 @@
}
static const struct file_operations uverbs_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
- .open = ib_uverbs_open,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
static const struct file_operations uverbs_mmap_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
- .open = ib_uverbs_open,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
@@ -735,8 +713,38 @@
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
+
+/*
+ * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
+ * requesting a new major number and doubling the number of max devices we
+ * support. It's stupid, but simple.
+ */
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
+ "infiniband_verbs");
+ if (ret) {
+ printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
+ if (ret >= IB_UVERBS_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_uverbs_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_uverbs_device *uverbs_dev;
if (!device->alloc_ucontext)
@@ -750,28 +758,36 @@
init_completion(&uverbs_dev->comp);
spin_lock(&map_lock);
- uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) {
+ devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+ if (devnum >= IB_UVERBS_MAX_DEVICES) {
spin_unlock(&map_lock);
- goto err;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
+
+ spin_lock(&map_lock);
+ uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ uverbs_dev->devnum = devnum;
+ base = devnum + IB_UVERBS_BASE_DEV;
+ set_bit(devnum, dev_map);
}
- set_bit(uverbs_dev->devnum, dev_map);
spin_unlock(&map_lock);
uverbs_dev->ib_dev = device;
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- uverbs_dev->cdev = cdev_alloc();
- if (!uverbs_dev->cdev)
- goto err;
- uverbs_dev->cdev->owner = THIS_MODULE;
- uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
+ cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->cdev.owner = THIS_MODULE;
+ uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
- uverbs_dev->cdev->dev, uverbs_dev,
+ uverbs_dev->cdev.dev, uverbs_dev,
"uverbs%d", uverbs_dev->devnum);
if (IS_ERR(uverbs_dev->dev))
goto err_cdev;
@@ -781,20 +797,19 @@
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = uverbs_dev;
- spin_unlock(&map_lock);
-
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
err_cdev:
- cdev_del(uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_del(&uverbs_dev->cdev);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@ -811,35 +826,19 @@
return;
dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
- cdev_del(uverbs_dev->cdev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
+ cdev_del(&uverbs_dev->cdev);
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = NULL;
- spin_unlock(&map_lock);
-
- clear_bit(uverbs_dev->devnum, dev_map);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(uverbs_dev->devnum, dev_map);
+ else
+ clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
wait_for_completion(&uverbs_dev->comp);
kfree(uverbs_dev);
}
-static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
- INFINIBANDEVENTFS_MAGIC, mnt);
-}
-
-static struct file_system_type uverbs_event_fs = {
- /* No owner field so module can be unloaded */
- .name = "infinibandeventfs",
- .get_sb = uverbs_event_get_sb,
- .kill_sb = kill_litter_super
-};
-
static int __init ib_uverbs_init(void)
{
int ret;
@@ -864,33 +863,14 @@
goto out_class;
}
- ret = register_filesystem(&uverbs_event_fs);
- if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
- goto out_class;
- }
-
- uverbs_event_mnt = kern_mount(&uverbs_event_fs);
- if (IS_ERR(uverbs_event_mnt)) {
- ret = PTR_ERR(uverbs_event_mnt);
- printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
- goto out_fs;
- }
-
ret = ib_register_client(&uverbs_client);
if (ret) {
printk(KERN_ERR "user_verbs: couldn't register client\n");
- goto out_mnt;
+ goto out_class;
}
return 0;
-out_mnt:
- mntput(uverbs_event_mnt);
-
-out_fs:
- unregister_filesystem(&uverbs_event_fs);
-
out_class:
class_destroy(uverbs_class);
@@ -904,10 +884,10 @@
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- mntput(uverbs_event_mnt);
- unregister_filesystem(&uverbs_event_fs);
class_destroy(uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
idr_destroy(&ib_uverbs_pd_idr);
idr_destroy(&ib_uverbs_mr_idr);
idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7..a28e862 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1);
if (i++ > 1000000) {
- BUG_ON(1);
printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name);
return -EIO;
@@ -155,7 +154,7 @@
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2)) *
- sizeof(struct t3_cqe),
+ if (kernel) {
+ cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ if (!cq->sw_queue)
+ return -ENOMEM;
+ }
+ cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) {
kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440c..073373c 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 8192
+#define T3_MAX_CQ_DEPTH 262144
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@
void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
+int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b..15073b2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{
- wq->queue->wq_in_err.err = 1;
+ wq->queue->wq_in_err.err |= 1;
+}
+
+static inline void cxio_disable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err |= 2;
+}
+
+static inline void cxio_enable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err &= ~2;
+}
+
+static inline int cxio_wq_db_enabled(struct t3_wq *wq)
+{
+ return !(wq->queue->wq_in_err.err & 2);
}
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea010..ee1d8b4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
+static int disable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ cxio_disable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static int enable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ if (data)
+ ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
+ cxio_enable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static void disable_dbs(struct iwch_dev *rnicp)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, enable_qp_db,
+ (void *)(unsigned long)ring_db);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void iwch_db_drop_task(struct work_struct *work)
+{
+ struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
+ db_drop_task.work);
+ enable_dbs(rnicp, 1);
+}
+
static void rnic_init(struct iwch_dev *rnicp)
{
PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@
idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock);
+ INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@
mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
if (dev->rdev.t3cdev_p == tdev) {
+ dev->rdev.flags = CXIO_ERROR_FATAL;
+ cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry);
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@
struct cxio_rdev *rdev = tdev->ulp;
struct iwch_dev *rnicp;
struct ib_event event;
- u32 portnum = port_id + 1;
+ u32 portnum = port_id + 1;
+ int dispatch = 0;
if (!rdev)
return;
@@ -174,21 +218,49 @@
case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL;
event.event = IB_EVENT_DEVICE_FATAL;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_DOWN: {
event.event = IB_EVENT_PORT_ERR;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_UP: {
event.event = IB_EVENT_PORT_ACTIVE;
+ dispatch = 1;
+ break;
+ }
+ case OFFLOAD_DB_FULL: {
+ disable_dbs(rnicp);
+ break;
+ }
+ case OFFLOAD_DB_EMPTY: {
+ enable_dbs(rnicp, 1);
+ break;
+ }
+ case OFFLOAD_DB_DROP: {
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ disable_dbs(rnicp);
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+
+ /*
+ * delay is between 1000-2023 usecs.
+ */
+ schedule_delayed_work(&rnicp->db_drop_task,
+ usecs_to_jiffies(delay));
break;
}
}
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
+ if (dispatch) {
+ event.device = &rnicp->ibdev;
+ event.element.port_num = portnum;
+ ib_dispatch_event(&event);
+ }
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 8473550..a1c4457 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
+#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
@@ -110,6 +111,7 @@
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
+ struct delayed_work db_drop_task;
};
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed71755..47b35c6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
- if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
+ if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cec..b4d893d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@
++(qhp->wq.sq_wptr);
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -514,7 +515,8 @@
num_wrs--;
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -597,7 +599,8 @@
++(qhp->wq.sq_wptr);
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
return err;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 42be0b1..b2b6fea 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -548,11 +548,10 @@
struct ehca_eq *eq = &shca->eq;
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
u64 eqe_value, ret;
- unsigned long flags;
int eqe_cnt, i;
int eq_empty = 0;
- spin_lock_irqsave(&eq->irq_spinlock, flags);
+ spin_lock(&eq->irq_spinlock);
if (is_irq) {
const int max_query_cnt = 100;
int query_cnt = 0;
@@ -643,7 +642,7 @@
} while (1);
unlock_irq_spinlock:
- spin_unlock_irqrestore(&eq->irq_spinlock, flags);
+ spin_unlock(&eq->irq_spinlock);
}
void ehca_tasklet_eq(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 0338f1f..b105f66 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -55,9 +55,7 @@
/*
* attributes not supported by query qp
*/
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
- IB_QP_MAX_QP_RD_ATOMIC | \
- IB_QP_ACCESS_FLAGS | \
+#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
IB_QP_EN_SQD_ASYNC_NOTIFY)
/*
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 8c1213f..dba8f9f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -222,7 +222,7 @@
{
int ret;
- if (!port_num || port_num > ibdev->phys_port_cnt)
+ if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 82878e3..eb7d59a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -59,8 +59,7 @@
size_t got;
int ret;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
- PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2a97c96..ae75389 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1214,7 +1214,7 @@
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
+ struct ib_device *ib_dev = sqp->qp.ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
@@ -1228,7 +1228,7 @@
for (i = 0; i < wr->num_sge; ++i)
send_size += wr->sg_list[i].length;
- ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header);
+ ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
sqp->ud_header.lrh.service_level =
be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c10576f..d2d172e 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1494,7 +1494,7 @@
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */
- mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
+ mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9d09ba..4272c52 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,6 +110,7 @@
static struct pci_device_id nes_pci_table[] = {
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
{0}
};
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 9884056..cc78fee 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -64,8 +64,9 @@
* NetEffect PCI vendor id and NE010 PCI device id.
*/
#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
-#define PCI_VENDOR_ID_NETEFFECT 0x1678
-#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_VENDOR_ID_NETEFFECT 0x1678
+#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
#endif
#define NE020_REV 4
@@ -193,8 +194,8 @@
extern u32 cm_packets_received;
extern u32 cm_packets_dropped;
extern u32 cm_packets_retrans;
-extern u32 cm_listens_created;
-extern u32 cm_listens_destroyed;
+extern atomic_t cm_listens_created;
+extern atomic_t cm_listens_destroyed;
extern u32 cm_backlog_drops;
extern atomic_t cm_loopbacks;
extern atomic_t cm_nodes_created;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 39468c2..2a49ee4 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -67,8 +67,8 @@
u32 cm_packets_retrans;
u32 cm_packets_created;
u32 cm_packets_received;
-u32 cm_listens_created;
-u32 cm_listens_destroyed;
+atomic_t cm_listens_created;
+atomic_t cm_listens_destroyed;
u32 cm_backlog_drops;
atomic_t cm_loopbacks;
atomic_t cm_nodes_created;
@@ -1011,9 +1011,10 @@
event.cm_info.loc_port =
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
+ add_ref_cm_node(loopback);
+ loopback->state = NES_CM_STATE_CLOSED;
cm_event_connect_error(&event);
cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
- loopback->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1042,7 +1043,7 @@
kfree(listener);
listener = NULL;
ret = 0;
- cm_listens_destroyed++;
+ atomic_inc(&cm_listens_destroyed);
} else {
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
@@ -3172,7 +3173,7 @@
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
- cm_listens_created++;
+ atomic_inc(&cm_listens_created);
}
cm_id->add_ref(cm_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb..ce7f538 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -748,16 +748,28 @@
if (hw_rev != NE020_REV) {
/* init serdes 0 */
- if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4))
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
- else
+ switch (nesadapter->phy_type[0]) {
+ case NES_PHY_TYPE_CX4:
+ if (wide_ppm_offset)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
+ case NES_PHY_TYPE_KR:
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
-
- if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
+ break;
+ case NES_PHY_TYPE_PUMA_1G:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
sds |= 0x00000100;
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
+ break;
+ default:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
}
+
if (!OneG_Mode)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
@@ -778,6 +790,9 @@
if (wide_ppm_offset)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
break;
+ case NES_PHY_TYPE_KR:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+ break;
case NES_PHY_TYPE_PUMA_1G:
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
sds |= 0x000000100;
@@ -1279,115 +1294,100 @@
/**
- * nes_init_phy
+ * nes_init_1g_phy
*/
-int nes_init_phy(struct nes_device *nesdev)
+int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 counter = 0;
+ u16 phy_data;
+ int ret = 0;
+
+ nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
+
+ /* Reset the PHY */
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
+ udelay(100);
+ counter = 0;
+ do {
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ if (counter++ > 100) {
+ ret = -1;
+ break;
+ }
+ } while (phy_data & 0x8000);
+
+ /* Setting no phy loopback */
+ phy_data &= 0xbfff;
+ phy_data |= 0x1140;
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
+
+ /* Setting the interrupt mask */
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+
+ /* turning on flow control */
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+
+ /* Clear Half duplex */
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+
+ return ret;
+}
+
+
+/**
+ * nes_init_2025_phy
+ */
+int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+{
+ u32 temp_phy_data = 0;
+ u32 temp_phy_data2 = 0;
u32 counter = 0;
u32 sds;
u32 mac_index = nesdev->mac_index;
- u32 tx_config = 0;
- u16 phy_data;
- u32 temp_phy_data = 0;
- u32 temp_phy_data2 = 0;
- u8 phy_type = nesadapter->phy_type[mac_index];
- u8 phy_index = nesadapter->phy_index[mac_index];
+ int ret = 0;
+ unsigned int first_attempt = 1;
- if ((nesadapter->OneG_Mode) &&
- (phy_type != NES_PHY_TYPE_PUMA_1G)) {
- nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
- if (phy_type == NES_PHY_TYPE_1G) {
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x04;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
+ /* Check firmware heartbeat */
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ udelay(1500);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
-
- /* Reset the PHY */
- nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
- udelay(100);
- counter = 0;
- do {
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (counter++ > 100)
- break;
- } while (phy_data & 0x8000);
-
- /* Setting no phy loopback */
- phy_data &= 0xbfff;
- phy_data |= 0x1140;
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
-
- /* Setting the interrupt mask */
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
-
- /* turning on flow control */
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
-
- /* Clear Half duplex */
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
-
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
-
- return 0;
- }
-
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- /* setup 10G MDIO operation */
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
- if ((phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- u32 first_time = 1;
-
- /* Check firmware heartbeat */
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ if (temp_phy_data != temp_phy_data2) {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- udelay(1500);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ if ((temp_phy_data & 0xff) > 0x20)
+ return 0;
+ printk(PFX "Reinitialize external PHY\n");
+ }
- if (temp_phy_data != temp_phy_data2) {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((temp_phy_data & 0xff) > 0x20)
- return 0;
- printk(PFX "Reinitializing PHY\n");
- }
+ /* no heartbeat, configure the PHY */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- /* no heartbeat, configure the PHY */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ switch (phy_type) {
+ case NES_PHY_TYPE_ARGUS:
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- if (phy_type == NES_PHY_TYPE_ARGUS) {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
- } else {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
- }
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
@@ -1395,71 +1395,151 @@
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
+ case NES_PHY_TYPE_SFP_D:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
- /* Bring PHY out of reset */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
- /* Check for heartbeat */
- counter = 0;
- mdelay(690);
+ case NES_PHY_TYPE_KR:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
+
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
+
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
+ break;
+ }
+
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
+
+ /* Bring PHY out of reset */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
+
+ /* Check for heartbeat */
+ counter = 0;
+ mdelay(690);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ do {
+ if (counter++ > 150) {
+ printk(PFX "No PHY heartbeat\n");
+ break;
+ }
+ mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ } while ((temp_phy_data2 == temp_phy_data));
+
+ /* wait for tracking */
+ counter = 0;
+ do {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- do {
- if (counter++ > 150) {
- printk(PFX "No PHY heartbeat\n");
+ if (counter++ > 300) {
+ if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
+ first_attempt = 0;
+ counter = 0;
+ /* reset AMCC PHY and try again */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
+ continue;
+ } else {
+ ret = 1;
break;
}
- mdelay(1);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
+ }
+ mdelay(10);
+ } while ((temp_phy_data & 0xff) < 0x30);
- /* wait for tracking */
- counter = 0;
- do {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (counter++ > 300) {
- if (((temp_phy_data & 0xff) == 0x0) && first_time) {
- first_time = 0;
- counter = 0;
- /* reset AMCC PHY and try again */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
- continue;
- } else {
- printk(PFX "PHY did not track\n");
- break;
- }
- }
- mdelay(10);
- } while ((temp_phy_data & 0xff) < 0x30);
-
- /* setup signal integrity */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ /* setup signal integrity */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ if (phy_type == NES_PHY_TYPE_KR) {
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
+ } else {
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
-
- /* reset serdes */
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200);
- sds |= 0x1;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
- sds &= 0xfffffffe;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
-
- counter = 0;
- while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
- && (counter++ < 5000))
- ;
}
- return 0;
+
+ /* reset serdes */
+ sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
+ sds |= 0x1;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+ sds &= 0xfffffffe;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+
+ counter = 0;
+ while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+ && (counter++ < 5000))
+ ;
+
+ return ret;
+}
+
+
+/**
+ * nes_init_phy
+ */
+int nes_init_phy(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 mac_index = nesdev->mac_index;
+ u32 tx_config = 0;
+ unsigned long flags;
+ u8 phy_type = nesadapter->phy_type[mac_index];
+ u8 phy_index = nesadapter->phy_index[mac_index];
+ int ret = 0;
+
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ if (phy_type == NES_PHY_TYPE_1G) {
+ /* setup 1G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x04;
+ } else {
+ /* setup 10G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x15;
+ }
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+
+ switch (phy_type) {
+ case NES_PHY_TYPE_1G:
+ ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
+ break;
+ case NES_PHY_TYPE_ARGUS:
+ case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
+ ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
+ break;
+ }
+
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+ return ret;
}
@@ -2460,23 +2540,9 @@
}
} else {
switch (nesadapter->phy_type[mac_index]) {
- case NES_PHY_TYPE_IRIS:
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
- __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
- break;
-
case NES_PHY_TYPE_ARGUS:
case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
/* clear the alarms */
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -3352,8 +3418,6 @@
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
- int must_disconn = 1;
- int must_terminate = 0;
struct ib_event ibevent;
nes_debug(NES_DBG_AEQ, "\n");
@@ -3367,6 +3431,8 @@
BUG_ON(!context);
}
+ /* context is nesqp unless async_event_id == CQ ERROR */
+ nesqp = (struct nes_qp *)(unsigned long)context;
async_event_id = (u16)aeq_info;
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
@@ -3378,8 +3444,6 @@
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
-
if (nesqp->term_flags)
return; /* Ignore it, wait for close complete */
@@ -3394,79 +3458,48 @@
async_event_id, nesqp->last_aeq, tcp_state);
}
- if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (nesqp->ibqp_state != IB_QPS_RTS)) {
- /* FIN Received but tcp state or IB state moved on,
- should expect a close complete */
- return;
- }
-
+ break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (nesqp->term_flags) {
nes_terminate_done(nesqp, 0);
return;
}
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- case NES_AEQE_AEID_LLP_CONNECTION_RESET:
case NES_AEQE_AEID_RESET_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
- if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
- tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- }
+ tcp_state = NES_AEQE_TCP_STATE_CLOSED;
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
nesqp->last_aeq = async_event_id;
-
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
- nesqp->hte_added = 0;
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
- }
-
- if ((nesqp->ibqp_state == IB_QPS_RTS) &&
- ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- switch (nesqp->hw_iwarp_state) {
- case NES_AEQE_IWARP_STATE_RTS:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- break;
- case NES_AEQE_IWARP_STATE_TERMINATE:
- must_disconn = 0; /* terminate path takes care of disconn */
- if (nesqp->term_flags == 0)
- must_terminate = 1;
- break;
- }
- } else {
- if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
- /* FIN Received but ib state not RTS,
- close complete will be on its way */
- must_disconn = 0;
- }
- }
+ nesqp->hte_added = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- if (must_terminate)
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
- else if (must_disconn) {
- if (next_iwarp_state) {
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- }
- nes_cm_disconn(nesqp);
- }
+ case NES_AEQE_AEID_LLP_CONNECTION_RESET:
+ if (atomic_read(&nesqp->close_timer_started))
+ return;
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_cm_disconn(nesqp);
break;
case NES_AEQE_AEID_TERMINATE_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_send_fin(nesdev, nesqp, aeqe);
break;
case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_received(nesdev, nesqp, aeqe);
break;
@@ -3480,7 +3513,8 @@
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
case NES_AEQE_AEID_AMP_TO_WRAP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
break;
@@ -3488,7 +3522,6 @@
case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
aeq_info &= 0xffff0000;
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
@@ -3530,7 +3563,8 @@
case NES_AEQE_AEID_STAG_ZERO_INVALID:
case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 084be0e..9b1e7f8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -37,12 +37,12 @@
#define NES_PHY_TYPE_CX4 1
#define NES_PHY_TYPE_1G 2
-#define NES_PHY_TYPE_IRIS 3
#define NES_PHY_TYPE_ARGUS 4
#define NES_PHY_TYPE_PUMA_1G 5
#define NES_PHY_TYPE_PUMA_10G 6
#define NES_PHY_TYPE_GLADIUS 7
#define NES_PHY_TYPE_SFP_D 8
+#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9384f5d..a1d79b6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1243,8 +1243,8 @@
target_stat_values[++index] = cm_packets_received;
target_stat_values[++index] = cm_packets_dropped;
target_stat_values[++index] = cm_packets_retrans;
- target_stat_values[++index] = cm_listens_created;
- target_stat_values[++index] = cm_listens_destroyed;
+ target_stat_values[++index] = atomic_read(&cm_listens_created);
+ target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
target_stat_values[++index] = cm_backlog_drops;
target_stat_values[++index] = atomic_read(&cm_loopbacks);
target_stat_values[++index] = atomic_read(&cm_nodes_created);
@@ -1474,9 +1474,9 @@
}
return 0;
}
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
+ if ((phy_type == NES_PHY_TYPE_ARGUS) ||
+ (phy_type == NES_PHY_TYPE_SFP_D) ||
+ (phy_type == NES_PHY_TYPE_KR)) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
@@ -1596,8 +1596,7 @@
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u32 u32temp;
- u16 phy_data;
- u16 temp_phy_data;
+ u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
@@ -1705,65 +1704,23 @@
if ((nesdev->netdev_count == 0) &&
((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
- ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) &&
+ ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
- /*
- * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
- * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
- */
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) {
+ if (phy_type != NES_PHY_TYPE_PUMA_1G) {
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)), u32temp);
}
- u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)));
-
- if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
- nes_init_phy(nesdev);
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- if (phy_data & 4) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- } else {
- nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
- }
- } else {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
- nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
- nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
- if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
- ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- }
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS)
- nes_init_phy(nesdev);
+ nes_init_phy(nesdev);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136..815725f 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -228,7 +228,7 @@
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
spin_unlock_irqrestore(&nesqp->lock, flags);
- return -EINVAL;
+ return -ENOMEM;
}
wqe = &nesqp->hwqp.sq_vbase[head];
@@ -3294,7 +3294,7 @@
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
@@ -3577,7 +3577,7 @@
}
/* Check for RQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index e9795f6..d10b4ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -55,9 +55,7 @@
struct ipoib_dev_priv *priv = netdev_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
- coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
- coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
return 0;
}
@@ -69,10 +67,8 @@
int ret;
/*
- * Since IPoIB uses a single CQ for both rx and tx, we assume
- * that rx params dictate the configuration. These values are
- * saved in the private data and returned when ipoib_get_coalesce()
- * is called.
+ * These values are saved in the private data and returned
+ * when ipoib_get_coalesce() is called
*/
if (coal->rx_coalesce_usecs > 0xffff ||
coal->rx_max_coalesced_frames > 0xffff)
@@ -85,8 +81,6 @@
return ret;
}
- coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
- coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5f7a6fc..71237f8f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -128,6 +128,28 @@
return 0;
}
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
+{
+ struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn->device;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ u64 dma_addr;
+
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
+
+ tx_desc->dma_addr = dma_addr;
+ tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
+ tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+
+ iser_task->headers_initialized = 1;
+ iser_task->iser_conn = iser_conn;
+ return 0;
+}
/**
* iscsi_iser_task_init - Initialize task
* @task: iscsi task
@@ -137,17 +159,17 @@
static int
iscsi_iser_task_init(struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
+ if (!iser_task->headers_initialized)
+ if (iser_initialize_task_headers(task, &iser_task->desc))
+ return -ENOMEM;
+
/* mgmt task */
- if (!task->sc) {
- iser_task->desc.data = task->data;
+ if (!task->sc)
return 0;
- }
iser_task->command_sent = 0;
- iser_task->iser_conn = iser_conn;
iser_task_rdma_init(iser_task);
return 0;
}
@@ -168,7 +190,7 @@
{
int error = 0;
- iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
+ iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
error = iser_send_control(conn, task);
@@ -178,9 +200,6 @@
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-
return error;
}
@@ -232,7 +251,7 @@
task->imm_count, task->unsol_r2t.data_length);
}
- iser_dbg("task deq [cid %d itt 0x%x]\n",
+ iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
conn->id, task->itt);
/* Send the cmd PDU */
@@ -248,8 +267,6 @@
error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_task_xmit_exit:
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
}
@@ -283,7 +300,7 @@
* due to issues with the login code re iser sematics
* this not set in iscsi_conn_setup - FIXME
*/
- conn->max_recv_dlength = 128;
+ conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
iser_conn = conn->dd_data;
conn->dd_data = iser_conn;
@@ -401,7 +418,7 @@
struct Scsi_Host *shost;
struct iser_conn *ib_conn;
- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
@@ -675,7 +692,7 @@
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
- sizeof (struct iser_desc),
+ sizeof(struct iser_tx_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (ig.desc_cache == NULL)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d529ca..036934c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
- ISER_MAX_RX_MISC_PDUS + \
- ISER_MAX_TX_MISC_PDUS)
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@
__be64 read_va;
} __attribute__((packed));
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
+
+#define ISER_RECV_DATA_SEG_LEN 128
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
+#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
@@ -187,51 +193,43 @@
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
- u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
- atomic_t ref_count; /* refcount, freed when dec to 0 */
-};
-
-#define MAX_REGD_BUF_VECTOR_LEN 2
-
-struct iser_dto {
- struct iscsi_iser_task *task;
- struct iser_conn *ib_conn;
- int notify_enable;
-
- /* vector of registered buffers */
- unsigned int regd_vector_len;
- struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
-
- /* offset into the registered buffer may be specified */
- unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
-
- /* a smaller size may be specified, if 0, then full size is used */
- unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
};
enum iser_desc_type {
- ISCSI_RX,
ISCSI_TX_CONTROL ,
ISCSI_TX_SCSI_COMMAND,
ISCSI_TX_DATAOUT
};
-struct iser_desc {
+struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
- struct iser_regd_buf hdr_regd_buf;
- void *data; /* used by RX & TX_CONTROL */
- struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
enum iser_desc_type type;
- struct iser_dto dto;
+ u64 dma_addr;
+ /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
+ of immediate data, unsolicited data-out or control (login,text) */
+ struct ib_sge tx_sg[2];
+ int num_sge;
};
+#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
+ sizeof(u64) + sizeof(struct ib_sge)))
+struct iser_rx_desc {
+ struct iser_hdr iser_header;
+ struct iscsi_hdr iscsi_header;
+ char data[ISER_RECV_DATA_SEG_LEN];
+ u64 dma_addr;
+ struct ib_sge rx_sg;
+ char pad[ISER_RX_PAD_SIZE];
+} __attribute__((packed));
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_cq *cq;
+ struct ib_cq *rx_cq;
+ struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
struct list_head ig_list; /* entry in ig devices list */
@@ -250,15 +248,18 @@
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
int disc_evt_flag; /* disconn event delivered */
wait_queue_head_t wait; /* waitq for conn/disconn */
- atomic_t post_recv_buf_count; /* posted rx count */
+ int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
- atomic_t unexpected_pdu_count;/* count of received *
- * unexpected pdus *
- * not yet retired */
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */
+
+ char *login_buf;
+ u64 login_dma;
+ unsigned int rx_desc_head;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
};
struct iscsi_iser_conn {
@@ -267,7 +268,7 @@
};
struct iscsi_iser_task {
- struct iser_desc desc;
+ struct iser_tx_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
int command_sent; /* set if command sent */
@@ -275,6 +276,7 @@
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
+ int headers_initialized;
};
struct iser_page_vec {
@@ -322,22 +324,17 @@
void iser_conn_terminate(struct iser_conn *ib_conn);
-void iser_rcv_completion(struct iser_desc *desc,
- unsigned long dto_xfer_len);
+void iser_rcv_completion(struct iser_rx_desc *desc,
+ unsigned long dto_xfer_len,
+ struct iser_conn *ib_conn);
-void iser_snd_completion(struct iser_desc *desc);
+void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
-void iser_dto_buffs_release(struct iser_dto *dto);
-
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
-
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction);
+void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
@@ -356,11 +353,9 @@
void iser_unreg_mem(struct iser_mem_reg *mem_reg);
-int iser_post_recv(struct iser_desc *rx_desc);
-int iser_post_send(struct iser_desc *tx_desc);
-
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp);
+int iser_post_recvl(struct iser_conn *ib_conn);
+int iser_post_recvm(struct iser_conn *ib_conn, int count);
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
@@ -368,4 +363,6 @@
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9de6402..0b9ef07 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -39,29 +39,6 @@
#include "iscsi_iser.h"
-/* Constant PDU lengths calculations */
-#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
- sizeof (struct iscsi_hdr))
-
-/* iser_dto_add_regd_buff - increments the reference count for *
- * the registered buffer & adds it to the DTO object */
-static void iser_dto_add_regd_buff(struct iser_dto *dto,
- struct iser_regd_buf *regd_buf,
- unsigned long use_offset,
- unsigned long use_size)
-{
- int add_idx;
-
- atomic_inc(®d_buf->ref_count);
-
- add_idx = dto->regd_vector_len;
- dto->regd[add_idx] = regd_buf;
- dto->used_sz[add_idx] = use_size;
- dto->offset[add_idx] = use_offset;
-
- dto->regd_vector_len++;
-}
-
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* iser_task->data[ISER_DIR_IN].data_len
@@ -122,9 +99,9 @@
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
- struct iser_dto *send_dto = &iser_task->desc.dto;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
+ struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
buf_out,
@@ -163,135 +140,100 @@
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
- iser_dto_add_regd_buff(send_dto,
- regd_buf,
- 0,
- imm_sz);
+ tx_dsg->addr = regd_buf->reg.va;
+ tx_dsg->length = imm_sz;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ iser_task->desc.num_sge = 2;
}
return 0;
}
-/**
- * iser_post_receive_control - allocates, initializes and posts receive DTO.
- */
-static int iser_post_receive_control(struct iscsi_conn *conn)
-{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_desc *rx_desc;
- struct iser_regd_buf *regd_hdr;
- struct iser_regd_buf *regd_data;
- struct iser_dto *recv_dto = NULL;
- struct iser_device *device = iser_conn->ib_conn->device;
- int rx_data_size, err;
- int posts, outstanding_unexp_pdus;
-
- /* for the login sequence we must support rx of upto 8K; login is done
- * after conn create/bind (connect) and conn stop/bind (reconnect),
- * what's common for both schemes is that the connection is not started
- */
- if (conn->c_stage != ISCSI_CONN_STARTED)
- rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
- else /* FIXME till user space sets conn->max_recv_dlength correctly */
- rx_data_size = 128;
-
- outstanding_unexp_pdus =
- atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
-
- /*
- * in addition to the response buffer, replace those consumed by
- * unexpected pdus.
- */
- for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
- rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
- if (rx_desc == NULL) {
- iser_err("Failed to alloc desc for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_cache_alloc_failure;
- }
- rx_desc->type = ISCSI_RX;
- rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
- if (rx_desc->data == NULL) {
- iser_err("Failed to alloc data buf for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_kmalloc_failure;
- }
-
- recv_dto = &rx_desc->dto;
- recv_dto->ib_conn = iser_conn->ib_conn;
- recv_dto->regd_vector_len = 0;
-
- regd_hdr = &rx_desc->hdr_regd_buf;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = device;
- regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
-
- iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
-
- iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
-
- regd_data = &rx_desc->data_regd_buf;
- memset(regd_data, 0, sizeof(struct iser_regd_buf));
- regd_data->device = device;
- regd_data->virt_addr = rx_desc->data;
- regd_data->data_size = rx_data_size;
-
- iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
-
- iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
-
- err = iser_post_recv(rx_desc);
- if (err) {
- iser_err("Failed iser_post_recv for post %d\n", posts);
- goto post_rx_post_recv_failure;
- }
- }
- /* all posts successful */
- return 0;
-
-post_rx_post_recv_failure:
- iser_dto_buffs_release(recv_dto);
- kfree(rx_desc->data);
-post_rx_kmalloc_failure:
- kmem_cache_free(ig.desc_cache, rx_desc);
-post_rx_cache_alloc_failure:
- if (posts > 0) {
- /*
- * response buffer posted, but did not replace all unexpected
- * pdu recv bufs. Ignore error, retry occurs next send
- */
- outstanding_unexp_pdus -= (posts - 1);
- err = 0;
- }
- atomic_add(outstanding_unexp_pdus,
- &iser_conn->ib_conn->unexpected_pdu_count);
-
- return err;
-}
-
/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
- struct iser_desc *tx_desc)
+static void iser_create_send_desc(struct iser_conn *ib_conn,
+ struct iser_tx_desc *tx_desc)
{
- struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf;
- struct iser_dto *send_dto = &tx_desc->dto;
+ struct iser_device *device = ib_conn->device;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = iser_conn->ib_conn->device;
- regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
-
- send_dto->ib_conn = iser_conn->ib_conn;
- send_dto->notify_enable = 1;
- send_dto->regd_vector_len = 0;
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
tx_desc->iser_header.flags = ISER_VER;
- iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0);
+ tx_desc->num_sge = 1;
+
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
+ }
+}
+
+
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+{
+ int i, j;
+ u64 dma_addr;
+ struct iser_rx_desc *rx_desc;
+ struct ib_sge *rx_sg;
+ struct iser_device *device = ib_conn->device;
+
+ ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+ sizeof(struct iser_rx_desc), GFP_KERNEL);
+ if (!ib_conn->rx_descs)
+ goto rx_desc_alloc_fail;
+
+ rx_desc = ib_conn->rx_descs;
+
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ goto rx_desc_dma_map_failed;
+
+ rx_desc->dma_addr = dma_addr;
+
+ rx_sg = &rx_desc->rx_sg;
+ rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+ rx_sg->lkey = device->mr->lkey;
+ }
+
+ ib_conn->rx_desc_head = 0;
+ return 0;
+
+rx_desc_dma_map_failed:
+ rx_desc = ib_conn->rx_descs;
+ for (j = 0; j < i; j++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
+ ib_conn->rx_descs = NULL;
+rx_desc_alloc_fail:
+ iser_err("failed allocating rx descriptors / data buffers\n");
+ return -ENOMEM;
+}
+
+void iser_free_rx_descriptors(struct iser_conn *ib_conn)
+{
+ int i;
+ struct iser_rx_desc *rx_desc;
+ struct iser_device *device = ib_conn->device;
+
+ if (ib_conn->login_buf) {
+ ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->login_buf);
+ }
+
+ if (!ib_conn->rx_descs)
+ return;
+
+ rx_desc = ib_conn->rx_descs;
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
}
/**
@@ -301,46 +243,23 @@
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- int i;
- /*
- * FIXME this value should be declared to the target during login with
- * the MaxOutstandingUnexpectedPDUs key when supported
- */
- int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
-
- iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
+ iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
/* Check that there is no posted recv or send buffers left - */
/* they must be consumed during the login phase */
- BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0);
+ BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+ if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
+ return -ENOMEM;
+
/* Initial post receive buffers */
- for (i = 0; i < initial_post_recv_bufs_num; i++) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
- i, conn);
- return -ENOMEM;
- }
- }
- iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
+ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ return -ENOMEM;
+
return 0;
}
-static int
-iser_check_xmit(struct iscsi_conn *conn, void *task)
-{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
-
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS) {
- iser_dbg("%ld can't xmit task %p\n",jiffies,task);
- return -ENOBUFS;
- }
- return 0;
-}
-
-
/**
* iser_send_command - send command PDU
*/
@@ -349,27 +268,18 @@
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_dto *send_dto = NULL;
unsigned long edtl;
- int err = 0;
+ int err;
struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
struct scsi_cmnd *sc = task->sc;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
- send_dto = &iser_task->desc.dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, &iser_task->desc);
+ tx_desc->type = ISCSI_TX_SCSI_COMMAND;
+ iser_create_send_desc(iser_conn->ib_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ)
data_buf = &iser_task->data[ISER_DIR_IN];
@@ -398,23 +308,13 @@
goto send_command_error;
}
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
-
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_recv failed!\n");
- err = -ENOMEM;
- goto send_command_error;
- }
-
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_task->desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_command_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
@@ -428,20 +328,13 @@
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *tx_desc = NULL;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *tx_desc = NULL;
+ struct iser_regd_buf *regd_buf;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err = 0;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
@@ -450,28 +343,25 @@
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
- tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
+ tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (tx_desc == NULL) {
iser_err("Failed to alloc desc for post dataout\n");
return -ENOMEM;
}
tx_desc->type = ISCSI_TX_DATAOUT;
+ tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
- /* build the tx desc regd header and add it to the tx desc dto */
- send_dto = &tx_desc->dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, tx_desc);
+ /* build the tx desc */
+ iser_initialize_task_headers(task, tx_desc);
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
-
- /* all data was registered for RDMA, we can use the lkey */
- iser_dto_add_regd_buff(send_dto,
- &iser_task->rdma_regd[ISER_DIR_OUT],
- buf_offset,
- data_seg_len);
+ regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ tx_dsg = &tx_desc->tx_sg[1];
+ tx_dsg->addr = regd_buf->reg.va + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
@@ -485,12 +375,11 @@
itt, buf_offset, data_seg_len);
- err = iser_post_send(tx_desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_data_out_error:
- iser_dto_buffs_release(send_dto);
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n",conn, err);
return err;
@@ -501,64 +390,44 @@
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *mdesc = &iser_task->desc;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
- struct iser_regd_buf *regd_buf;
struct iser_device *device;
- unsigned char opcode;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
- send_dto = &mdesc->dto;
- send_dto->task = NULL;
- iser_create_send_desc(iser_conn, mdesc);
+ iser_create_send_desc(iser_conn->ib_conn, mdesc);
device = iser_conn->ib_conn->device;
- iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
-
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
- regd_buf = &mdesc->data_regd_buf;
- memset(regd_buf, 0, sizeof(struct iser_regd_buf));
- regd_buf->device = device;
- regd_buf->virt_addr = task->data;
- regd_buf->data_size = task->data_count;
- iser_reg_single(device, regd_buf,
- DMA_TO_DEVICE);
- iser_dto_add_regd_buff(send_dto, regd_buf,
- 0,
- data_seg_len);
- }
-
- opcode = task->hdr->opcode & ISCSI_OPCODE_MASK;
-
- /* post recv buffer for response if one is expected */
- if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_rcv_buff failed!\n");
- err = -ENOMEM;
+ struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
+ if (task != conn->login_task) {
+ iser_err("data present on non login task!!!\n");
goto send_control_error;
}
+ memcpy(iser_conn->ib_conn->login_buf, task->data,
+ task->data_count);
+ tx_dsg->addr = iser_conn->ib_conn->login_dma;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = device->mr->lkey;
+ mdesc->num_sge = 2;
}
- err = iser_post_send(mdesc);
+ if (task == conn->login_task) {
+ err = iser_post_recvl(iser_conn->ib_conn);
+ if (err)
+ goto send_control_error;
+ }
+
+ err = iser_post_send(iser_conn->ib_conn, mdesc);
if (!err)
return 0;
send_control_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
@@ -566,104 +435,71 @@
/**
* iser_rcv_dto_completion - recv DTO completion
*/
-void iser_rcv_completion(struct iser_desc *rx_desc,
- unsigned long dto_xfer_len)
+void iser_rcv_completion(struct iser_rx_desc *rx_desc,
+ unsigned long rx_xfer_len,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &rx_desc->dto;
- struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
- struct iscsi_task *task;
- struct iscsi_iser_task *iser_task;
+ struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
- char *rx_data = NULL;
- int rx_data_len = 0;
- unsigned char opcode;
+ u64 rx_dma;
+ int rx_buflen, outstanding, count, err;
+
+ /* differentiate between login to all other PDUs */
+ if ((char *)rx_desc == ib_conn->login_buf) {
+ rx_dma = ib_conn->login_dma;
+ rx_buflen = ISER_RX_LOGIN_SIZE;
+ } else {
+ rx_dma = rx_desc->dma_addr;
+ rx_buflen = ISER_RX_PAYLOAD_SIZE;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header;
- iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt);
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
- if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
- rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
- rx_data = dto->regd[1]->virt_addr;
- rx_data += dto->offset[1];
- }
+ iscsi_iser_recv(conn->iscsi_conn, hdr,
+ rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
- opcode = hdr->opcode & ISCSI_OPCODE_MASK;
-
- if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
- spin_lock(&conn->iscsi_conn->session->lock);
- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
- if (task)
- __iscsi_get_task(task);
- spin_unlock(&conn->iscsi_conn->session->lock);
-
- if (!task)
- iser_err("itt can't be matched to task!!! "
- "conn %p opcode %d itt %d\n",
- conn->iscsi_conn, opcode, hdr->itt);
- else {
- iser_task = task->dd_data;
- iser_dbg("itt %d task %p\n",hdr->itt, task);
- iser_task->status = ISER_TASK_STATUS_COMPLETED;
- iser_task_rdma_finalize(iser_task);
- iscsi_put_task(task);
- }
- }
- iser_dto_buffs_release(dto);
-
- iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
-
- kfree(rx_desc->data);
- kmem_cache_free(ig.desc_cache, rx_desc);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
- atomic_dec(&conn->ib_conn->post_recv_buf_count);
+ conn->ib_conn->post_recv_buf_count--;
- /*
- * if an unexpected PDU was received then the recv wr consumed must
- * be replaced, this is done in the next send of a control-type PDU
- */
- if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) {
- /* nop-in with itt = 0xffffffff */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
+ if (rx_dma == ib_conn->login_dma)
+ return;
+
+ outstanding = ib_conn->post_recv_buf_count;
+ if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
+ count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
+ ISER_MIN_POSTED_RX);
+ err = iser_post_recvm(ib_conn, count);
+ if (err)
+ iser_err("posting %d rx bufs err %d\n", count, err);
}
- else if (opcode == ISCSI_OP_ASYNC_EVENT) {
- /* asyncronous message */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
- }
- /* a reject PDU consumes the recv buf posted for the response */
}
-void iser_snd_completion(struct iser_desc *tx_desc)
+void iser_snd_completion(struct iser_tx_desc *tx_desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &tx_desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
- struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
- struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_task *task;
- int resume_tx = 0;
+ struct iser_device *device = ib_conn->device;
- iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
-
- iser_dto_buffs_release(dto);
-
- if (tx_desc->type == ISCSI_TX_DATAOUT)
+ if (tx_desc->type == ISCSI_TX_DATAOUT) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
-
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS)
- resume_tx = 1;
+ }
atomic_dec(&ib_conn->post_send_buf_count);
- if (resume_tx) {
- iser_dbg("%ld resuming tx\n",jiffies);
- iscsi_conn_queue_work(conn);
- }
-
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
@@ -692,7 +528,6 @@
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int deferred;
int is_rdma_aligned = 1;
struct iser_regd_buf *regd;
@@ -710,32 +545,17 @@
if (iser_task->dir[ISER_DIR_IN]) {
regd = &iser_task->rdma_regd[ISER_DIR_IN];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-IN rdma reg\n",
- atomic_read(®d->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(®d->reg);
}
if (iser_task->dir[ISER_DIR_OUT]) {
regd = &iser_task->rdma_regd[ISER_DIR_OUT];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-OUT rdma reg\n",
- atomic_read(®d->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(®d->reg);
}
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_task);
}
-
-void iser_dto_buffs_release(struct iser_dto *dto)
-{
- int i;
-
- for (i = 0; i < dto->regd_vector_len; i++)
- iser_regd_buff_release(dto->regd[i]);
-}
-
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 274c883..fb88d68 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -41,62 +41,6 @@
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
/**
- * Decrements the reference count for the
- * registered buffer & releases it
- *
- * returns 0 if released, 1 if deferred
- */
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
-{
- struct ib_device *dev;
-
- if ((atomic_read(®d_buf->ref_count) == 0) ||
- atomic_dec_and_test(®d_buf->ref_count)) {
- /* if we used the dma mr, unreg is just NOP */
- if (regd_buf->reg.is_fmr)
- iser_unreg_mem(®d_buf->reg);
-
- if (regd_buf->dma_addr) {
- dev = regd_buf->device->ib_device;
- ib_dma_unmap_single(dev,
- regd_buf->dma_addr,
- regd_buf->data_size,
- regd_buf->direction);
- }
- /* else this regd buf is associated with task which we */
- /* dma_unmap_single/sg later */
- return 0;
- } else {
- iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
- return 1;
- }
-}
-
-/**
- * iser_reg_single - fills registered buffer descriptor with
- * registration information
- */
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction)
-{
- u64 dma_addr;
-
- dma_addr = ib_dma_map_single(device->ib_device,
- regd_buf->virt_addr,
- regd_buf->data_size, direction);
- BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.len = regd_buf->data_size;
- regd_buf->reg.va = dma_addr;
- regd_buf->reg.is_fmr = 0;
-
- regd_buf->dma_addr = dma_addr;
- regd_buf->direction = direction;
-}
-
-/**
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
@@ -109,10 +53,10 @@
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- mem = (void *)__get_free_pages(GFP_NOIO,
+ mem = (void *)__get_free_pages(GFP_ATOMIC,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
- mem = kmalloc(cmd_data_len, GFP_NOIO);
+ mem = kmalloc(cmd_data_len, GFP_ATOMIC);
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
@@ -474,9 +418,5 @@
return err;
}
}
-
- /* take a reference on this regd buf such that it will not be released *
- * (eg in send dto completion) before we get the scsi response */
- atomic_inc(®d_buf->ref_count);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8579f32..308d17b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -37,9 +37,8 @@
#include "iscsi_iser.h"
#define ISCSI_ISER_MAX_CONN 8
-#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \
- ISER_QP_MAX_REQ_DTOS) * \
- ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
@@ -67,15 +66,23 @@
if (IS_ERR(device->pd))
goto pd_err;
- device->cq = ib_create_cq(device->ib_device,
+ device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
- ISER_MAX_CQ_LEN, 0);
- if (IS_ERR(device->cq))
- goto cq_err;
+ ISER_MAX_RX_CQ_LEN, 0);
+ if (IS_ERR(device->rx_cq))
+ goto rx_cq_err;
- if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP))
+ device->tx_cq = ib_create_cq(device->ib_device,
+ NULL, iser_cq_event_callback,
+ (void *)device,
+ ISER_MAX_TX_CQ_LEN, 0);
+
+ if (IS_ERR(device->tx_cq))
+ goto tx_cq_err;
+
+ if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
@@ -93,8 +100,10 @@
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
- ib_destroy_cq(device->cq);
-cq_err:
+ ib_destroy_cq(device->tx_cq);
+tx_cq_err:
+ ib_destroy_cq(device->rx_cq);
+rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
@@ -112,11 +121,13 @@
tasklet_kill(&device->cq_tasklet);
(void)ib_dereg_mr(device->mr);
- (void)ib_destroy_cq(device->cq);
+ (void)ib_destroy_cq(device->tx_cq);
+ (void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
device->mr = NULL;
- device->cq = NULL;
+ device->tx_cq = NULL;
+ device->rx_cq = NULL;
device->pd = NULL;
}
@@ -129,13 +140,23 @@
{
struct iser_device *device;
struct ib_qp_init_attr init_attr;
- int ret;
+ int ret = -ENOMEM;
struct ib_fmr_pool_param params;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!ib_conn->login_buf) {
+ goto alloc_err;
+ ret = -ENOMEM;
+ }
+
+ ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
GFP_KERNEL);
@@ -169,12 +190,12 @@
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = device->cq;
- init_attr.recv_cq = device->cq;
+ init_attr.send_cq = device->tx_cq;
+ init_attr.recv_cq = device->rx_cq;
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
- init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;
- init_attr.cap.max_recv_sge = 2;
+ init_attr.cap.max_send_sge = 2;
+ init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
@@ -192,6 +213,7 @@
(void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
fmr_pool_err:
kfree(ib_conn->page_vec);
+ kfree(ib_conn->login_buf);
alloc_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
@@ -278,17 +300,6 @@
mutex_unlock(&ig.device_list_mutex);
}
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp)
-{
- int ret;
-
- spin_lock_bh(&ib_conn->lock);
- ret = (ib_conn->state == comp);
- spin_unlock_bh(&ib_conn->lock);
- return ret;
-}
-
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp,
enum iser_ib_conn_state exch)
@@ -314,7 +325,7 @@
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
-
+ iser_free_rx_descriptors(ib_conn);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
@@ -442,7 +453,7 @@
ISCSI_ERR_CONN_FAILED);
/* Complete the termination process if no posts are pending */
- if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) &&
+ if (ib_conn->post_recv_buf_count == 0 &&
(atomic_read(&ib_conn->post_send_buf_count) == 0)) {
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
@@ -489,9 +500,8 @@
{
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
- atomic_set(&ib_conn->post_recv_buf_count, 0);
+ ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->unexpected_pdu_count, 0);
atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -626,136 +636,97 @@
reg->mem_h = NULL;
}
-/**
- * iser_dto_to_iov - builds IOV from a dto descriptor
- */
-static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
+int iser_post_recvl(struct iser_conn *ib_conn)
{
- int i;
- struct ib_sge *sge;
- struct iser_regd_buf *regd_buf;
+ struct ib_recv_wr rx_wr, *rx_wr_failed;
+ struct ib_sge sge;
+ int ib_ret;
- if (dto->regd_vector_len > iov_len) {
- iser_err("iov size %d too small for posting dto of len %d\n",
- iov_len, dto->regd_vector_len);
- BUG();
- }
+ sge.addr = ib_conn->login_dma;
+ sge.length = ISER_RX_LOGIN_SIZE;
+ sge.lkey = ib_conn->device->mr->lkey;
- for (i = 0; i < dto->regd_vector_len; i++) {
- sge = &iov[i];
- regd_buf = dto->regd[i];
+ rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
+ rx_wr.sg_list = &sge;
+ rx_wr.num_sge = 1;
+ rx_wr.next = NULL;
- sge->addr = regd_buf->reg.va;
- sge->length = regd_buf->reg.len;
- sge->lkey = regd_buf->reg.lkey;
-
- if (dto->used_sz[i] > 0) /* Adjust size */
- sge->length = dto->used_sz[i];
-
- /* offset and length should not exceed the regd buf length */
- if (sge->length + dto->offset[i] > regd_buf->reg.len) {
- iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
- "%ld in dto:0x%p [%d], va:0x%08lX\n",
- (unsigned long)sge->length, dto->offset[i],
- (unsigned long)regd_buf->reg.len, dto, i,
- (unsigned long)sge->addr);
- BUG();
- }
-
- sge->addr += dto->offset[i]; /* Adjust offset */
- }
-}
-
-/**
- * iser_post_recv - Posts a receive buffer.
- *
- * returns 0 on success, -1 on failure
- */
-int iser_post_recv(struct iser_desc *rx_desc)
-{
- int ib_ret, ret_val = 0;
- struct ib_recv_wr recv_wr, *recv_wr_failed;
- struct ib_sge iov[2];
- struct iser_conn *ib_conn;
- struct iser_dto *recv_dto = &rx_desc->dto;
-
- /* Retrieve conn */
- ib_conn = recv_dto->ib_conn;
-
- iser_dto_to_iov(recv_dto, iov, 2);
-
- recv_wr.next = NULL;
- recv_wr.sg_list = iov;
- recv_wr.num_sge = recv_dto->regd_vector_len;
- recv_wr.wr_id = (unsigned long)rx_desc;
-
- atomic_inc(&ib_conn->post_recv_buf_count);
- ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed);
+ ib_conn->post_recv_buf_count++;
+ ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- atomic_dec(&ib_conn->post_recv_buf_count);
- ret_val = -1;
+ ib_conn->post_recv_buf_count--;
+ }
+ return ib_ret;
+}
+
+int iser_post_recvm(struct iser_conn *ib_conn, int count)
+{
+ struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ int i, ib_ret;
+ unsigned int my_rx_head = ib_conn->rx_desc_head;
+ struct iser_rx_desc *rx_desc;
+
+ for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &ib_conn->rx_descs[my_rx_head];
+ rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->sg_list = &rx_desc->rx_sg;
+ rx_wr->num_sge = 1;
+ rx_wr->next = rx_wr + 1;
+ my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
}
- return ret_val;
+ rx_wr--;
+ rx_wr->next = NULL; /* mark end of work requests list */
+
+ ib_conn->post_recv_buf_count += count;
+ ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
+ if (ib_ret) {
+ iser_err("ib_post_recv failed ret=%d\n", ib_ret);
+ ib_conn->post_recv_buf_count -= count;
+ } else
+ ib_conn->rx_desc_head = my_rx_head;
+ return ib_ret;
}
+
/**
* iser_start_send - Initiate a Send DTO operation
*
* returns 0 on success, -1 on failure
*/
-int iser_post_send(struct iser_desc *tx_desc)
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
- int ib_ret, ret_val = 0;
+ int ib_ret;
struct ib_send_wr send_wr, *send_wr_failed;
- struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
- struct iser_conn *ib_conn;
- struct iser_dto *dto = &tx_desc->dto;
- ib_conn = dto->ib_conn;
-
- iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
- send_wr.sg_list = iov;
- send_wr.num_sge = dto->regd_vector_len;
+ send_wr.sg_list = tx_desc->tx_sg;
+ send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0;
+ send_wr.send_flags = IB_SEND_SIGNALED;
atomic_inc(&ib_conn->post_send_buf_count);
ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
if (ib_ret) {
- iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
- dto, dto->regd_vector_len);
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
atomic_dec(&ib_conn->post_send_buf_count);
- ret_val = -1;
}
-
- return ret_val;
+ return ib_ret;
}
-static void iser_handle_comp_error(struct iser_desc *desc)
+static void iser_handle_comp_error(struct iser_tx_desc *desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
-
- iser_dto_buffs_release(dto);
-
- if (desc->type == ISCSI_RX) {
- kfree(desc->data);
+ if (desc && desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_recv_buf_count);
- } else { /* type is TX control/command/dataout */
- if (desc->type == ISCSI_TX_DATAOUT)
- kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_send_buf_count);
- }
- if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
+ if (ib_conn->post_recv_buf_count == 0 &&
atomic_read(&ib_conn->post_send_buf_count) == 0) {
/* getting here when the state is UP means that the conn is *
* being terminated asynchronously from the iSCSI layer's *
@@ -774,32 +745,74 @@
}
}
+static int iser_drain_tx_cq(struct iser_device *device)
+{
+ struct ib_cq *cq = device->tx_cq;
+ struct ib_wc wc;
+ struct iser_tx_desc *tx_desc;
+ struct iser_conn *ib_conn;
+ int completed_tx = 0;
+
+ while (ib_poll_cq(cq, 1, &wc) == 1) {
+ tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
+ ib_conn = wc.qp->qp_context;
+ if (wc.status == IB_WC_SUCCESS) {
+ if (wc.opcode == IB_WC_SEND)
+ iser_snd_completion(tx_desc, ib_conn);
+ else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_SEND, wc.opcode);
+ } else {
+ iser_err("tx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, ib_conn);
+ }
+ completed_tx++;
+ }
+ return completed_tx;
+}
+
+
static void iser_cq_tasklet_fn(unsigned long data)
{
struct iser_device *device = (struct iser_device *)data;
- struct ib_cq *cq = device->cq;
+ struct ib_cq *cq = device->rx_cq;
struct ib_wc wc;
- struct iser_desc *desc;
+ struct iser_rx_desc *desc;
unsigned long xfer_len;
+ struct iser_conn *ib_conn;
+ int completed_tx, completed_rx;
+ completed_tx = completed_rx = 0;
while (ib_poll_cq(cq, 1, &wc) == 1) {
- desc = (struct iser_desc *) (unsigned long) wc.wr_id;
+ desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
BUG_ON(desc == NULL);
-
+ ib_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
- if (desc->type == ISCSI_RX) {
+ if (wc.opcode == IB_WC_RECV) {
xfer_len = (unsigned long)wc.byte_len;
- iser_rcv_completion(desc, xfer_len);
- } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
- iser_snd_completion(desc);
+ iser_rcv_completion(desc, xfer_len, ib_conn);
+ } else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_RECV, wc.opcode);
} else {
- iser_err("comp w. error op %d status %d\n",desc->type,wc.status);
- iser_handle_comp_error(desc);
+ if (wc.status != IB_WC_WR_FLUSH_ERR)
+ iser_err("rx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ ib_conn->post_recv_buf_count--;
+ iser_handle_comp_error(NULL, ib_conn);
}
+ completed_rx++;
+ if (!(completed_rx & 63))
+ completed_tx += iser_drain_tx_cq(device);
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ completed_tx += iser_drain_tx_cq(device);
+ iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 54c8fe2..ed3f9eb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -80,7 +80,8 @@
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
-static void srp_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
@@ -227,14 +228,21 @@
if (!init_attr)
return -ENOMEM;
- target->cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_completion, NULL, target, SRP_CQ_SIZE, 0);
- if (IS_ERR(target->cq)) {
- ret = PTR_ERR(target->cq);
- goto out;
+ target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(target->recv_cq)) {
+ ret = PTR_ERR(target->recv_cq);
+ goto err;
}
- ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
+ target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(target->send_cq)) {
+ ret = PTR_ERR(target->send_cq);
+ goto err_recv_cq;
+ }
+
+ ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -243,24 +251,32 @@
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->cq;
- init_attr->recv_cq = target->cq;
+ init_attr->send_cq = target->send_cq;
+ init_attr->recv_cq = target->recv_cq;
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
if (IS_ERR(target->qp)) {
ret = PTR_ERR(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
+ goto err_send_cq;
}
ret = srp_init_qp(target, target->qp);
- if (ret) {
- ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
- }
+ if (ret)
+ goto err_qp;
-out:
+ kfree(init_attr);
+ return 0;
+
+err_qp:
+ ib_destroy_qp(target->qp);
+
+err_send_cq:
+ ib_destroy_cq(target->send_cq);
+
+err_recv_cq:
+ ib_destroy_cq(target->recv_cq);
+
+err:
kfree(init_attr);
return ret;
}
@@ -270,7 +286,8 @@
int i;
ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
+ ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(target->recv_cq);
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
@@ -568,7 +585,9 @@
if (ret)
goto err;
- while (ib_poll_cq(target->cq, 1, &wc) > 0)
+ while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
+ ; /* nothing */
+ while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
spin_lock_irq(target->scsi_host->host_lock);
@@ -851,7 +870,7 @@
struct srp_iu *iu;
u8 opcode;
- iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
+ iu = target->rx_ring[wc->wr_id];
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
@@ -898,7 +917,7 @@
DMA_FROM_DEVICE);
}
-static void srp_completion(struct ib_cq *cq, void *target_ptr)
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
@@ -907,17 +926,31 @@
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed %s status %d\n",
- wc.wr_id & SRP_OP_RECV ? "receive" : "send",
+ PFX "failed receive status %d\n",
wc.status);
target->qp_in_error = 1;
break;
}
- if (wc.wr_id & SRP_OP_RECV)
- srp_handle_recv(target, &wc);
- else
- ++target->tx_tail;
+ srp_handle_recv(target, &wc);
+ }
+}
+
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+ struct ib_wc wc;
+
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ if (wc.status) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed send status %d\n",
+ wc.status);
+ target->qp_in_error = 1;
+ break;
+ }
+
+ ++target->tx_tail;
}
}
@@ -930,7 +963,7 @@
int ret;
next = target->rx_head & (SRP_RQ_SIZE - 1);
- wr.wr_id = next | SRP_OP_RECV;
+ wr.wr_id = next;
iu = target->rx_ring[next];
list.addr = iu->dma;
@@ -970,6 +1003,8 @@
{
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
+ srp_send_completion(target->send_cq, target);
+
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
return NULL;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e185b90..5a80eac 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -60,7 +60,6 @@
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
- SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
@@ -69,8 +68,6 @@
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
};
-#define SRP_OP_RECV (1 << 31)
-
enum srp_target_state {
SRP_TARGET_LIVE,
SRP_TARGET_CONNECTING,
@@ -133,7 +130,8 @@
int path_query_id;
struct ib_cm_id *cm_id;
- struct ib_cq *cq;
+ struct ib_cq *recv_cq;
+ struct ib_cq *send_cq;
struct ib_qp *qp;
int max_ti_iu_len;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 509c8f3..70ffbd0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4680,7 +4680,7 @@
{
unsigned long cpu;
struct page *spare_page;
- struct raid5_percpu *allcpus;
+ struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd70835..0f86f5e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@
* lists and performing address
* conversions
*/
- } *percpu;
+ } __percpu *percpu;
size_t scribble_len; /* size of scribble region must be
* associated with conf to handle
* cpu hotplug while reshaping
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 7a3de16..75afe4f 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -239,47 +239,18 @@
};
/* Adjust the template string if models with longer names appear. */
-#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4))
-
-static size_t model_name(u32 *directory, __be32 *buffer)
-{
- struct fw_csr_iterator ci;
- int i, length, key, value, last_key = 0;
- u32 *block = NULL;
-
- fw_csr_iterator_init(&ci, directory);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (last_key == CSR_MODEL &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
- }
-
- if (block == NULL)
- return 0;
-
- length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
- if (length <= 0)
- return 0;
-
- /* fast-forward to text string */
- block += 3;
-
- for (i = 0; i < length; i++)
- buffer[i] = cpu_to_be32(block[i]);
-
- return length * 4;
-}
+#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
static int node_probe(struct device *dev)
{
struct firedtv *fdtv;
- __be32 name[MAX_MODEL_NAME_LEN];
+ char name[MAX_MODEL_NAME_LEN];
int name_len, err;
- name_len = model_name(fw_unit(dev)->directory, name);
+ name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
+ name, sizeof(name));
- fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len);
+ fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
if (!fdtv)
return -ENOMEM;
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 9b413a3..0f50508 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -616,10 +616,12 @@
{
int devnum = iminor(inode);
pdabusb_t s;
+ int r;
if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB))
return -EIO;
+ lock_kernel();
s = &dabusb[devnum - DABUSB_MINOR];
dbg("dabusb_open");
@@ -634,6 +636,7 @@
msleep_interruptible(500);
if (signal_pending (current)) {
+ unlock_kernel();
return -EAGAIN;
}
mutex_lock(&s->mutex);
@@ -641,6 +644,7 @@
if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) {
mutex_unlock(&s->mutex);
dev_err(&s->usbdev->dev, "set_interface failed\n");
+ unlock_kernel();
return -EINVAL;
}
s->opened = 1;
@@ -649,7 +653,9 @@
file->f_pos = 0;
file->private_data = s;
- return nonseekable_open(inode, file);
+ r = nonseekable_open(inode, file);
+ unlock_kernel();
+ return r;
}
static int dabusb_release (struct inode *inode, struct file *file)
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f537555..3fab78b 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -37,6 +37,7 @@
#include <linux/gfp.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -47,19 +48,9 @@
#define UART_NR 8 /* Number of UARTs this driver can handle */
-#define UART_XMIT_SIZE PAGE_SIZE
+#define FIFO_SIZE PAGE_SIZE
#define WAKEUP_CHARS 256
-#define circ_empty(circ) ((circ)->head == (circ)->tail)
-#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-
struct uart_icount {
__u32 cts;
__u32 dsr;
@@ -82,7 +73,7 @@
struct mutex func_lock;
struct task_struct *in_sdio_uart_irq;
unsigned int regs_offset;
- struct circ_buf xmit;
+ struct kfifo xmit_fifo;
spinlock_t write_lock;
struct uart_icount icount;
unsigned int uartclk;
@@ -105,6 +96,8 @@
kref_init(&port->kref);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
+ if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+ return -ENOMEM;
spin_lock(&sdio_uart_table_lock);
for (index = 0; index < UART_NR; index++) {
@@ -140,6 +133,7 @@
{
struct sdio_uart_port *port =
container_of(kref, struct sdio_uart_port, kref);
+ kfifo_free(&port->xmit_fifo);
kfree(port);
}
@@ -456,9 +450,11 @@
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
{
- struct circ_buf *xmit = &port->xmit;
+ struct kfifo *xmit = &port->xmit_fifo;
int count;
struct tty_struct *tty;
+ u8 iobuf[16];
+ int len;
if (port->x_char) {
sdio_out(port, UART_TX, port->x_char);
@@ -469,27 +465,25 @@
tty = tty_port_tty_get(&port->port);
- if (tty == NULL || circ_empty(xmit) ||
+ if (tty == NULL || !kfifo_len(xmit) ||
tty->stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
tty_kref_put(tty);
return;
}
- count = 16;
- do {
- sdio_out(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+ for (count = 0; count < len; count++) {
+ sdio_out(port, UART_TX, iobuf[count]);
port->icount.tx++;
- if (circ_empty(xmit))
- break;
- } while (--count > 0);
+ }
- if (circ_chars_pending(xmit) < WAKEUP_CHARS)
+ len = kfifo_len(xmit);
+ if (len < WAKEUP_CHARS) {
tty_wakeup(tty);
-
- if (circ_empty(xmit))
- sdio_uart_stop_tx(port);
+ if (len == 0)
+ sdio_uart_stop_tx(port);
+ }
tty_kref_put(tty);
}
@@ -632,7 +626,6 @@
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
- unsigned long page;
int ret;
/*
@@ -641,22 +634,17 @@
*/
set_bit(TTY_IO_ERROR, &tty->flags);
- /* Initialise and allocate the transmit buffer. */
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- port->xmit.buf = (unsigned char *)page;
- circ_clear(&port->xmit);
+ kfifo_reset(&port->xmit_fifo);
ret = sdio_uart_claim_func(port);
if (ret)
- goto err1;
+ return ret;
ret = sdio_enable_func(port->func);
if (ret)
- goto err2;
+ goto err1;
ret = sdio_claim_irq(port->func, sdio_uart_irq);
if (ret)
- goto err3;
+ goto err2;
/*
* Clear the FIFO buffers and disable them.
@@ -700,12 +688,10 @@
sdio_uart_release_func(port);
return 0;
-err3:
- sdio_disable_func(port->func);
err2:
- sdio_uart_release_func(port);
+ sdio_disable_func(port->func);
err1:
- free_page((unsigned long)port->xmit.buf);
+ sdio_uart_release_func(port);
return ret;
}
@@ -727,7 +713,7 @@
ret = sdio_uart_claim_func(port);
if (ret)
- goto skip;
+ return;
sdio_uart_stop_rx(port);
@@ -749,10 +735,6 @@
sdio_disable_func(port->func);
sdio_uart_release_func(port);
-
-skip:
- /* Free the transmit buffer page. */
- free_page((unsigned long)port->xmit.buf);
}
/**
@@ -822,27 +804,12 @@
int count)
{
struct sdio_uart_port *port = tty->driver_data;
- struct circ_buf *circ = &port->xmit;
- int c, ret = 0;
+ int ret;
if (!port->func)
return -ENODEV;
- spin_lock(&port->write_lock);
- while (1) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
- spin_unlock(&port->write_lock);
-
+ ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
if (!(port->ier & UART_IER_THRI)) {
int err = sdio_uart_claim_func(port);
if (!err) {
@@ -859,13 +826,13 @@
static int sdio_uart_write_room(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_free(&port->xmit) : 0;
+ return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
}
static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_pending(&port->xmit) : 0;
+ return kfifo_len(&port->xmit_fifo);
}
static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b..4cd7f42 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@
struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task;
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
struct dentry *debugfs_root;
struct mutex mdio_lock;
@@ -335,6 +339,7 @@
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 6fd968a..3e453e1 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/stringify.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -140,7 +141,7 @@
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
-static struct workqueue_struct *cxgb3_wq;
+struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
@@ -586,6 +587,19 @@
V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
static void init_napi(struct adapter *adap)
{
int i;
@@ -2750,6 +2764,42 @@
spin_unlock_irq(&adapter->work_lock);
}
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
/*
* Processes external (PHY) interrupts in process context.
*/
@@ -3218,6 +3268,11 @@
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62..929c298 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@
OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN,
- OFFLOAD_PORT_UP
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
};
struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b..cb42353 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 0482059..78e265b 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
#include "sge_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
+#include "cxgb3_offload.h"
#define USE_GTS 0
@@ -2841,8 +2842,13 @@
}
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
- CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
- status & F_HIPIODRBDROPERR ? "high" : "lo");
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51..95a8ba0 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@
F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
- F_HIRCQPARITYERROR)
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index ad113b0..0950fa4 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2908,6 +2908,7 @@
netmos_9805,
netmos_9815,
netmos_9901,
+ netmos_9865,
quatech_sppxp100,
};
@@ -2989,6 +2990,7 @@
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3092,6 +3094,10 @@
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x2000, 0, 0, netmos_9901 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000, 0, 0, netmos_9865 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x2000, 0, 0, netmos_9865 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ec73294..e2dc289 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -40,7 +40,7 @@
static int once_over (void);
static int remove_ranges (struct bus_node *, struct bus_node *);
static int update_bridge_ranges (struct bus_node **);
-static int add_range (int type, struct range_node *, struct bus_node *);
+static int add_bus_range (int type, struct range_node *, struct bus_node *);
static void fix_resources (struct bus_node *);
static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
@@ -133,7 +133,7 @@
newrange->rangeno = 1;
else {
/* need to insert our range */
- add_range (flag, newrange, newbus);
+ add_bus_range (flag, newrange, newbus);
debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
}
@@ -384,7 +384,7 @@
* Input: type of the resource, range to add, current bus
* Output: 0 or -1, bus and range ptrs
********************************************************************************/
-static int add_range (int type, struct range_node *range, struct bus_node *bus_cur)
+static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
struct range_node *range_cur = NULL;
struct range_node *range_prev;
@@ -455,7 +455,7 @@
/*******************************************************************************
* This routine goes through the list of resources of type 'type' and updates
- * the range numbers that they correspond to. It was called from add_range fnc
+ * the range numbers that they correspond to. It was called from add_bus_range fnc
*
* Input: bus, type of the resource, the rangeno starting from which to update
******************************************************************************/
@@ -1999,7 +1999,7 @@
if (bus_sec->noIORanges > 0) {
if (!range_exists_already (range, bus_sec, IO)) {
- add_range (IO, range, bus_sec);
+ add_bus_range (IO, range, bus_sec);
++bus_sec->noIORanges;
} else {
kfree (range);
@@ -2048,7 +2048,7 @@
if (bus_sec->noMemRanges > 0) {
if (!range_exists_already (range, bus_sec, MEM)) {
- add_range (MEM, range, bus_sec);
+ add_bus_range (MEM, range, bus_sec);
++bus_sec->noMemRanges;
} else {
kfree (range);
@@ -2102,7 +2102,7 @@
if (bus_sec->noPFMemRanges > 0) {
if (!range_exists_already (range, bus_sec, PFMEM)) {
- add_range (PFMEM, range, bus_sec);
+ add_bus_range (PFMEM, range, bus_sec);
++bus_sec->noPFMemRanges;
} else {
kfree (range);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6848f21..cd2ee6f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -59,6 +59,8 @@
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on RFKILL || RFKILL = n
+ select INPUT_SPARSEKMAP
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
@@ -177,6 +179,7 @@
tristate "Compal Laptop Extras"
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL
---help---
This is a driver for laptops built by Compal:
@@ -320,9 +323,15 @@
server running, phase of the moon, and the current mood of
Schroedinger's cat. If you can use X.org's RandR to control
your ThinkPad's video output ports instead of this feature,
- don't think twice: do it and say N here to save some memory.
+ don't think twice: do it and say N here to save memory and avoid
+ bad interactions with X.org.
- If you are not sure, say Y here.
+ NOTE: access to this feature is limited to processes with the
+ CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
+ where it interacts badly with X.org.
+
+ If you are not sure, say Y here but do try to check if you could
+ be using X.org RandR instead.
config THINKPAD_ACPI_HOTKEY_POLL
bool "Support NVRAM polling for hot keys"
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 61a1c75..791fcf3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -45,58 +45,23 @@
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/rfkill.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
-#include <linux/input.h>
-#define ASUS_LAPTOP_VERSION "0.42"
+#define ASUS_LAPTOP_VERSION "0.42"
-#define ASUS_HOTK_NAME "Asus Laptop Support"
-#define ASUS_HOTK_CLASS "hotkey"
-#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE KBUILD_MODNAME
-#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
-
-
-/*
- * Some events we use, same for all Asus
- */
-#define ATKD_BR_UP 0x10
-#define ATKD_BR_DOWN 0x20
-#define ATKD_LCD_ON 0x33
-#define ATKD_LCD_OFF 0x34
-
-/*
- * Known bits returned by \_SB.ATKD.HWRS
- */
-#define WL_HWRS 0x80
-#define BT_HWRS 0x100
-
-/*
- * Flags for hotk status
- * WL_ON and BT_ON are also used for wireless_status()
- */
-#define WL_ON 0x01 /* internal Wifi */
-#define BT_ON 0x02 /* internal Bluetooth */
-#define MLED_ON 0x04 /* mail LED */
-#define TLED_ON 0x08 /* touchpad LED */
-#define RLED_ON 0x10 /* Record LED */
-#define PLED_ON 0x20 /* Phone LED */
-#define GLED_ON 0x40 /* Gaming LED */
-#define LCD_ON 0x80 /* LCD backlight */
-#define GPS_ON 0x100 /* GPS */
-#define KEY_ON 0x200 /* Keyboard backlight */
-
-#define ASUS_LOG ASUS_HOTK_FILE ": "
-#define ASUS_ERR KERN_ERR ASUS_LOG
-#define ASUS_WARNING KERN_WARNING ASUS_LOG
-#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
-#define ASUS_INFO KERN_INFO ASUS_LOG
-#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
+#define ASUS_LAPTOP_NAME "Asus Laptop Support"
+#define ASUS_LAPTOP_CLASS "hotkey"
+#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
+#define ASUS_LAPTOP_FILE KBUILD_MODNAME
+#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
-MODULE_DESCRIPTION(ASUS_HOTK_NAME);
+MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
MODULE_LICENSE("GPL");
/*
@@ -113,225 +78,209 @@
module_param(wapf, uint, 0644);
MODULE_PARM_DESC(wapf, "WAPF value");
-#define ASUS_HANDLE(object, paths...) \
- static acpi_handle object##_handle = NULL; \
- static char *object##_paths[] = { paths }
+static uint wlan_status = 1;
+static uint bluetooth_status = 1;
+
+module_param(wlan_status, uint, 0644);
+MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+module_param(bluetooth_status, uint, 0644);
+MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+/*
+ * Some events we use, same for all Asus
+ */
+#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
+#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
+#define ATKD_BR_MIN ATKD_BR_UP
+#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
+#define ATKD_LCD_ON 0x33
+#define ATKD_LCD_OFF 0x34
+
+/*
+ * Known bits returned by \_SB.ATKD.HWRS
+ */
+#define WL_HWRS 0x80
+#define BT_HWRS 0x100
+
+/*
+ * Flags for hotk status
+ * WL_ON and BT_ON are also used for wireless_status()
+ */
+#define WL_RSTS 0x01 /* internal Wifi */
+#define BT_RSTS 0x02 /* internal Bluetooth */
/* LED */
-ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
-ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
-ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
-ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
-ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */
+#define METHOD_MLED "MLED"
+#define METHOD_TLED "TLED"
+#define METHOD_RLED "RLED" /* W1JC */
+#define METHOD_PLED "PLED" /* A7J */
+#define METHOD_GLED "GLED" /* G1, G2 (probably) */
/* LEDD */
-ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
+#define METHOD_LEDD "SLCM"
/*
* Bluetooth and WLAN
* WLED and BLED are not handled like other XLED, because in some dsdt
* they also control the WLAN/Bluetooth device.
*/
-ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
-ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
-ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
+#define METHOD_WLAN "WLED"
+#define METHOD_BLUETOOTH "BLED"
+#define METHOD_WL_STATUS "RSTS"
/* Brightness */
-ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
-ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
+#define METHOD_BRIGHTNESS_SET "SPLV"
+#define METHOD_BRIGHTNESS_GET "GPLV"
/* Backlight */
-ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
- "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
- "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
- "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
- "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
- "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
- "\\_SB.PCI0.PX40.Q10", /* S1x */
- "\\Q10"); /* A2x, L2D, L3D, M2E */
+static acpi_handle lcd_switch_handle;
+static const char *lcd_switch_paths[] = {
+ "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
+ "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
+ "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
+ "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
+ "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
+ "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
+ "\\_SB.PCI0.PX40.Q10", /* S1x */
+ "\\Q10"}; /* A2x, L2D, L3D, M2E */
/* Display */
-ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
-ASUS_HANDLE(display_get,
- /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
- "\\_SB.PCI0.P0P1.VGA.GETD",
- /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
- "\\_SB.PCI0.P0P2.VGA.GETD",
- /* A6V A6Q */
- "\\_SB.PCI0.P0P3.VGA.GETD",
- /* A6T, A6M */
- "\\_SB.PCI0.P0PA.VGA.GETD",
- /* L3C */
- "\\_SB.PCI0.PCI1.VGAC.NMAP",
- /* Z96F */
- "\\_SB.PCI0.VGA.GETD",
- /* A2D */
- "\\ACTD",
- /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
- "\\ADVG",
- /* P30 */
- "\\DNXT",
- /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
- "\\INFB",
- /* A3F A6F A3N A3L M6N W3N W6A */
- "\\SSTE");
+#define METHOD_SWITCH_DISPLAY "SDSP"
-ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
-ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
+static acpi_handle display_get_handle;
+static const char *display_get_paths[] = {
+ /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
+ "\\_SB.PCI0.P0P1.VGA.GETD",
+ /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
+ "\\_SB.PCI0.P0P2.VGA.GETD",
+ /* A6V A6Q */
+ "\\_SB.PCI0.P0P3.VGA.GETD",
+ /* A6T, A6M */
+ "\\_SB.PCI0.P0PA.VGA.GETD",
+ /* L3C */
+ "\\_SB.PCI0.PCI1.VGAC.NMAP",
+ /* Z96F */
+ "\\_SB.PCI0.VGA.GETD",
+ /* A2D */
+ "\\ACTD",
+ /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
+ "\\ADVG",
+ /* P30 */
+ "\\DNXT",
+ /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
+ "\\INFB",
+ /* A3F A6F A3N A3L M6N W3N W6A */
+ "\\SSTE"};
+
+#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
+#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
-ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
-ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
-ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
+#define METHOD_GPS_ON "SDON"
+#define METHOD_GPS_OFF "SDOF"
+#define METHOD_GPS_STATUS "GPST"
/* Keyboard light */
-ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB");
-ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB");
+#define METHOD_KBD_LIGHT_SET "SLKB"
+#define METHOD_KBD_LIGHT_GET "GLKB"
+
+/*
+ * Define a specific led structure to keep the main structure clean
+ */
+struct asus_led {
+ int wk;
+ struct work_struct work;
+ struct led_classdev led;
+ struct asus_laptop *asus;
+ const char *method;
+};
/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
*/
-struct asus_hotk {
+struct asus_laptop {
char *name; /* laptop name */
- struct acpi_device *device; /* the device we are in */
+
+ struct acpi_table_header *dsdt_info;
+ struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
+ struct backlight_device *backlight_device;
+
+ struct input_dev *inputdev;
+ struct key_entry *keymap;
+
+ struct asus_led mled;
+ struct asus_led tled;
+ struct asus_led rled;
+ struct asus_led pled;
+ struct asus_led gled;
+ struct asus_led kled;
+ struct workqueue_struct *led_workqueue;
+
+ int wireless_status;
+ bool have_rsts;
+ int lcd_state;
+
+ struct rfkill *gps_rfkill;
+
acpi_handle handle; /* the handle of the hotk device */
- char status; /* status of the hotk, for LEDs, ... */
u32 ledd_status; /* status of the LED display */
u8 light_level; /* light sensor level */
u8 light_switch; /* light sensor switch value */
u16 event_count[128]; /* count for each event TODO make this better */
- struct input_dev *inputdev;
u16 *keycode_map;
};
-/*
- * This header is made available to allow proper configuration given model,
- * revision number , ... this info cannot go in struct asus_hotk because it is
- * available before the hotk
- */
-static struct acpi_table_header *asus_info;
-
-/* The actual device the driver binds to */
-static struct asus_hotk *hotk;
-
-/*
- * The hotkey driver declaration
- */
-static const struct acpi_device_id asus_device_ids[] = {
- {"ATK0100", 0},
- {"ATK0101", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, asus_device_ids);
-
-static int asus_hotk_add(struct acpi_device *device);
-static int asus_hotk_remove(struct acpi_device *device, int type);
-static void asus_hotk_notify(struct acpi_device *device, u32 event);
-
-static struct acpi_driver asus_hotk_driver = {
- .name = ASUS_HOTK_NAME,
- .class = ASUS_HOTK_CLASS,
- .owner = THIS_MODULE,
- .ids = asus_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = asus_hotk_add,
- .remove = asus_hotk_remove,
- .notify = asus_hotk_notify,
- },
-};
-
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *asus_backlight_device;
-
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops asusbl_ops = {
- .get_brightness = read_brightness,
- .update_status = update_bl_status,
-};
-
-/*
- * These functions actually update the LED's, and are called from a
- * workqueue. By doing this as separate work rather than when the LED
- * subsystem asks, we avoid messing with the Asus ACPI stuff during a
- * potentially bad time, such as a timer interrupt.
- */
-static struct workqueue_struct *led_workqueue;
-
-#define ASUS_LED(object, ledname, max) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value); \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev); \
- static void object##_led_update(struct work_struct *ignored); \
- static int object##_led_wk; \
- static DECLARE_WORK(object##_led_work, object##_led_update); \
- static struct led_classdev object##_led = { \
- .name = "asus::" ledname, \
- .brightness_set = object##_led_set, \
- .brightness_get = object##_led_get, \
- .max_brightness = max \
- }
-
-ASUS_LED(mled, "mail", 1);
-ASUS_LED(tled, "touchpad", 1);
-ASUS_LED(rled, "record", 1);
-ASUS_LED(pled, "phone", 1);
-ASUS_LED(gled, "gaming", 1);
-ASUS_LED(kled, "kbd_backlight", 3);
-
-struct key_entry {
- char type;
- u8 code;
- u16 keycode;
-};
-
-enum { KE_KEY, KE_END };
-
-static struct key_entry asus_keymap[] = {
- {KE_KEY, 0x02, KEY_SCREENLOCK},
- {KE_KEY, 0x05, KEY_WLAN},
- {KE_KEY, 0x08, KEY_F13},
- {KE_KEY, 0x17, KEY_ZOOM},
- {KE_KEY, 0x1f, KEY_BATTERY},
- {KE_KEY, 0x30, KEY_VOLUMEUP},
- {KE_KEY, 0x31, KEY_VOLUMEDOWN},
- {KE_KEY, 0x32, KEY_MUTE},
- {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x40, KEY_PREVIOUSSONG},
- {KE_KEY, 0x41, KEY_NEXTSONG},
- {KE_KEY, 0x43, KEY_STOPCD},
- {KE_KEY, 0x45, KEY_PLAYPAUSE},
- {KE_KEY, 0x4c, KEY_MEDIA},
- {KE_KEY, 0x50, KEY_EMAIL},
- {KE_KEY, 0x51, KEY_WWW},
- {KE_KEY, 0x55, KEY_CALC},
- {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */
- {KE_KEY, 0x5D, KEY_WLAN},
- {KE_KEY, 0x5E, KEY_WLAN},
- {KE_KEY, 0x5F, KEY_WLAN},
- {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */
- {KE_KEY, 0x82, KEY_CAMERA},
- {KE_KEY, 0x88, KEY_WLAN },
- {KE_KEY, 0x8A, KEY_PROG1},
- {KE_KEY, 0x95, KEY_MEDIA},
- {KE_KEY, 0x99, KEY_PHONE},
- {KE_KEY, 0xc4, KEY_KBDILLUMUP},
- {KE_KEY, 0xc5, KEY_KBDILLUMDOWN},
+static const struct key_entry asus_keymap[] = {
+ /* Lenovo SL Specific keycodes */
+ {KE_KEY, 0x02, { KEY_SCREENLOCK } },
+ {KE_KEY, 0x05, { KEY_WLAN } },
+ {KE_KEY, 0x08, { KEY_F13 } },
+ {KE_KEY, 0x17, { KEY_ZOOM } },
+ {KE_KEY, 0x1f, { KEY_BATTERY } },
+ /* End of Lenovo SL Specific keycodes */
+ {KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ {KE_KEY, 0x32, { KEY_MUTE } },
+ {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ {KE_KEY, 0x41, { KEY_NEXTSONG } },
+ {KE_KEY, 0x43, { KEY_STOPCD } },
+ {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } },
+ {KE_KEY, 0x50, { KEY_EMAIL } },
+ {KE_KEY, 0x51, { KEY_WWW } },
+ {KE_KEY, 0x55, { KEY_CALC } },
+ {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
+ {KE_KEY, 0x5D, { KEY_WLAN } },
+ {KE_KEY, 0x5E, { KEY_WLAN } },
+ {KE_KEY, 0x5F, { KEY_WLAN } },
+ {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x82, { KEY_CAMERA } },
+ {KE_KEY, 0x88, { KEY_WLAN } },
+ {KE_KEY, 0x8A, { KEY_PROG1 } },
+ {KE_KEY, 0x95, { KEY_MEDIA } },
+ {KE_KEY, 0x99, { KEY_PHONE } },
+ {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
+
/*
* This function evaluates an ACPI method, given an int as parameter, the
* method is searched within the scope of the handle, can be NULL. The output
@@ -339,8 +288,8 @@
*
* returns 0 if write is successful, -1 else.
*/
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
- struct acpi_buffer *output)
+static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
+ struct acpi_buffer *output)
{
struct acpi_object_list params; /* list of input parameters (an int) */
union acpi_object in_obj; /* the only param we use */
@@ -361,102 +310,82 @@
return -1;
}
-static int read_wireless_status(int mask)
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
-
- if (!wireless_status_handle)
- return (hotk->status & mask) ? 1 : 0;
-
- rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading Wireless status\n");
- else
- return (status & mask) ? 1 : 0;
-
- return (hotk->status & mask) ? 1 : 0;
+ return write_acpi_int_ret(handle, method, val, NULL);
}
-static int read_gps_status(void)
+static int acpi_check_handle(acpi_handle handle, const char *method,
+ acpi_handle *ret)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
+ acpi_status status;
- rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading GPS status\n");
- else
- return status ? 1 : 0;
+ if (method == NULL)
+ return -ENODEV;
- return (hotk->status & GPS_ON) ? 1 : 0;
-}
+ if (ret)
+ status = acpi_get_handle(handle, (char *)method,
+ ret);
+ else {
+ acpi_handle dummy;
-/* Generic LED functions */
-static int read_status(int mask)
-{
- /* There is a special method for both wireless devices */
- if (mask == BT_ON || mask == WL_ON)
- return read_wireless_status(mask);
- else if (mask == GPS_ON)
- return read_gps_status();
-
- return (hotk->status & mask) ? 1 : 0;
-}
-
-static void write_status(acpi_handle handle, int out, int mask)
-{
- hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
-
- switch (mask) {
- case MLED_ON:
- out = !(out & 0x1);
- break;
- case GLED_ON:
- out = (out & 0x1) + 1;
- break;
- case GPS_ON:
- handle = (out) ? gps_on_handle : gps_off_handle;
- out = 0x02;
- break;
- default:
- out &= 0x1;
- break;
+ status = acpi_get_handle(handle, (char *)method,
+ &dummy);
}
- if (write_acpi_int(handle, NULL, out, NULL))
- pr_warning(" write failed %x\n", mask);
+ if (status != AE_OK) {
+ if (ret)
+ pr_warning("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
}
-/* /sys/class/led handlers */
-#define ASUS_LED_HANDLER(object, mask) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value) \
- { \
- object##_led_wk = (value > 0) ? 1 : 0; \
- queue_work(led_workqueue, &object##_led_work); \
- } \
- static void object##_led_update(struct work_struct *ignored) \
- { \
- int value = object##_led_wk; \
- write_status(object##_set_handle, value, (mask)); \
- } \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev) \
- { \
- return led_cdev->brightness; \
- }
+/* Generic LED function */
+static int asus_led_set(struct asus_laptop *asus, const char *method,
+ int value)
+{
+ if (!strcmp(method, METHOD_MLED))
+ value = !value;
+ else if (!strcmp(method, METHOD_GLED))
+ value = !value + 1;
+ else
+ value = !!value;
-ASUS_LED_HANDLER(mled, MLED_ON);
-ASUS_LED_HANDLER(pled, PLED_ON);
-ASUS_LED_HANDLER(rled, RLED_ON);
-ASUS_LED_HANDLER(tled, TLED_ON);
-ASUS_LED_HANDLER(gled, GLED_ON);
+ return write_acpi_int(asus->handle, method, value);
+}
/*
- * Keyboard backlight
+ * LEDs
*/
-static int get_kled_lvl(void)
+/* /sys/class/led handlers */
+static void asus_led_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = !!value;
+ queue_work(asus->led_workqueue, &led->work);
+}
+
+static void asus_led_cdev_update(struct work_struct *work)
+{
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_led_set(asus, led->method, led->wk);
+}
+
+static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+/*
+ * Keyboard backlight (also a LED)
+ */
+static int asus_kled_lvl(struct asus_laptop *asus)
{
unsigned long long kblv;
struct acpi_object_list params;
@@ -468,75 +397,183 @@
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = 2;
- rv = acpi_evaluate_integer(kled_get_handle, NULL, ¶ms, &kblv);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
+ ¶ms, &kblv);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading kled level\n");
- return 0;
+ return -ENODEV;
}
return kblv;
}
-static int set_kled_lvl(int kblv)
+static int asus_kled_set(struct asus_laptop *asus, int kblv)
{
if (kblv > 0)
kblv = (1 << 7) | (kblv & 0x7F);
else
kblv = 0;
- if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
pr_warning("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
}
-static void kled_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static void asus_kled_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- kled_led_wk = value;
- queue_work(led_workqueue, &kled_led_work);
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = value;
+ queue_work(asus->led_workqueue, &led->work);
}
-static void kled_led_update(struct work_struct *ignored)
+static void asus_kled_cdev_update(struct work_struct *work)
{
- set_kled_lvl(kled_led_wk);
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_kled_set(asus, led->wk);
}
-static enum led_brightness kled_led_get(struct led_classdev *led_cdev)
+static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
{
- return get_kled_lvl();
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ return asus_kled_lvl(asus);
}
-static int get_lcd_state(void)
+static void asus_led_exit(struct asus_laptop *asus)
{
- return read_status(LCD_ON);
+ if (asus->mled.led.dev)
+ led_classdev_unregister(&asus->mled.led);
+ if (asus->tled.led.dev)
+ led_classdev_unregister(&asus->tled.led);
+ if (asus->pled.led.dev)
+ led_classdev_unregister(&asus->pled.led);
+ if (asus->rled.led.dev)
+ led_classdev_unregister(&asus->rled.led);
+ if (asus->gled.led.dev)
+ led_classdev_unregister(&asus->gled.led);
+ if (asus->kled.led.dev)
+ led_classdev_unregister(&asus->kled.led);
+ if (asus->led_workqueue) {
+ destroy_workqueue(asus->led_workqueue);
+ asus->led_workqueue = NULL;
+ }
}
-static int set_lcd_state(int value)
+/* Ugly macro, need to fix that later */
+static int asus_led_register(struct asus_laptop *asus,
+ struct asus_led *led,
+ const char *name, const char *method)
+{
+ struct led_classdev *led_cdev = &led->led;
+
+ if (!method || acpi_check_handle(asus->handle, method, NULL))
+ return 0; /* Led not present */
+
+ led->asus = asus;
+ led->method = method;
+
+ INIT_WORK(&led->work, asus_led_cdev_update);
+ led_cdev->name = name;
+ led_cdev->brightness_set = asus_led_cdev_set;
+ led_cdev->brightness_get = asus_led_cdev_get;
+ led_cdev->max_brightness = 1;
+ return led_classdev_register(&asus->platform_device->dev, led_cdev);
+}
+
+static int asus_led_init(struct asus_laptop *asus)
+{
+ int r;
+
+ /*
+ * Functions that actually update the LED's are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
+ if (r)
+ goto error;
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
+ struct asus_led *led = &asus->kled;
+ struct led_classdev *cdev = &led->led;
+
+ led->asus = asus;
+
+ INIT_WORK(&led->work, asus_kled_cdev_update);
+ cdev->name = "asus::kbd_backlight";
+ cdev->brightness_set = asus_kled_cdev_set;
+ cdev->brightness_get = asus_kled_cdev_get;
+ cdev->max_brightness = 3;
+ r = led_classdev_register(&asus->platform_device->dev, cdev);
+ }
+error:
+ if (r)
+ asus_led_exit(asus);
+ return r;
+}
+
+/*
+ * Backlight device
+ */
+static int asus_lcd_status(struct asus_laptop *asus)
+{
+ return asus->lcd_state;
+}
+
+static int asus_lcd_set(struct asus_laptop *asus, int value)
{
int lcd = 0;
acpi_status status = 0;
- lcd = value ? 1 : 0;
+ lcd = !!value;
- if (lcd == get_lcd_state())
+ if (lcd == asus_lcd_status(asus))
return 0;
- if (lcd_switch_handle) {
- status = acpi_evaluate_object(lcd_switch_handle,
- NULL, NULL, NULL);
+ if (!lcd_switch_handle)
+ return -ENODEV;
- if (ACPI_FAILURE(status))
- pr_warning("Error switching LCD\n");
+ status = acpi_evaluate_object(lcd_switch_handle,
+ NULL, NULL, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ pr_warning("Error switching LCD\n");
+ return -ENODEV;
}
- write_status(NULL, lcd, LCD_ON);
+ asus->lcd_state = lcd;
return 0;
}
-static void lcd_blank(int blank)
+static void lcd_blank(struct asus_laptop *asus, int blank)
{
- struct backlight_device *bd = asus_backlight_device;
+ struct backlight_device *bd = asus->backlight_device;
+
+ asus->lcd_state = (blank == FB_BLANK_UNBLANK);
if (bd) {
bd->props.power = blank;
@@ -544,44 +581,91 @@
}
}
-static int read_brightness(struct backlight_device *bd)
+static int asus_read_brightness(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
acpi_status rv = AE_OK;
- rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
+ NULL, &value);
if (ACPI_FAILURE(rv))
pr_warning("Error reading brightness\n");
return value;
}
-static int set_brightness(struct backlight_device *bd, int value)
+static int asus_set_brightness(struct backlight_device *bd, int value)
{
- int ret = 0;
+ struct asus_laptop *asus = bl_get_data(bd);
- value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
- /* 0 <= value <= 15 */
-
- if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
pr_warning("Error changing brightness\n");
- ret = -EIO;
+ return -EIO;
}
-
- return ret;
+ return 0;
}
static int update_bl_status(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
int rv;
int value = bd->props.brightness;
- rv = set_brightness(bd, value);
+ rv = asus_set_brightness(bd, value);
if (rv)
return rv;
value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
- return set_lcd_state(value);
+ return asus_lcd_set(asus, value);
+}
+
+static struct backlight_ops asusbl_ops = {
+ .get_brightness = asus_read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int asus_backlight_notify(struct asus_laptop *asus)
+{
+ struct backlight_device *bd = asus->backlight_device;
+ int old = bd->props.brightness;
+
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int asus_backlight_init(struct asus_laptop *asus)
+{
+ struct backlight_device *bd;
+ struct device *dev = &asus->platform_device->dev;
+
+ if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
+ lcd_switch_handle) {
+ bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
+ asus, &asusbl_ops);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register asus backlight device\n");
+ asus->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ asus->backlight_device = bd;
+
+ bd->props.max_brightness = 15;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = asus_read_brightness(bd);
+ backlight_update_status(bd);
+ }
+ return 0;
+}
+
+static void asus_backlight_exit(struct asus_laptop *asus)
+{
+ if (asus->backlight_device)
+ backlight_device_unregister(asus->backlight_device);
+ asus->backlight_device = NULL;
}
/*
@@ -596,25 +680,26 @@
static ssize_t show_infos(struct device *dev,
struct device_attribute *attr, char *page)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
acpi_status rv = AE_OK;
/*
- * We use the easy way, we don't care of off and count, so we don't set eof
- * to 1
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
*/
- len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
- len += sprintf(page + len, "Model reference : %s\n", hotk->name);
+ len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
+ len += sprintf(page + len, "Model reference : %s\n", asus->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
- rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "SFUN value : %#x\n",
(uint) temp);
@@ -624,7 +709,7 @@
* The significance of others is yet to be found.
* If we don't find the method, we assume the device are present.
*/
- rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "HRWS value : %#x\n",
(uint) temp);
@@ -635,26 +720,26 @@
* Note: since not all the laptops provide this method, errors are
* silently ignored.
*/
- rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
- if (asus_info) {
- snprintf(buf, 16, "%d", asus_info->length);
+ if (asus->dsdt_info) {
+ snprintf(buf, 16, "%d", asus->dsdt_info->length);
len += sprintf(page + len, "DSDT length : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->checksum);
+ snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
len += sprintf(page + len, "DSDT checksum : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->revision);
+ snprintf(buf, 16, "%d", asus->dsdt_info->revision);
len += sprintf(page + len, "DSDT revision : %s\n", buf);
- snprintf(buf, 7, "%s", asus_info->oem_id);
+ snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
len += sprintf(page + len, "OEM id : %s\n", buf);
- snprintf(buf, 9, "%s", asus_info->oem_table_id);
+ snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
len += sprintf(page + len, "OEM table id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->oem_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
- snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
+ snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
}
@@ -672,8 +757,9 @@
return count;
}
-static ssize_t store_status(const char *buf, size_t count,
- acpi_handle handle, int mask)
+static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
+ const char *buf, size_t count,
+ const char *method)
{
int rv, value;
int out = 0;
@@ -682,8 +768,8 @@
if (rv > 0)
out = value ? 1 : 0;
- write_status(handle, out, mask);
-
+ if (write_acpi_int(asus->handle, method, value))
+ return -ENODEV;
return rv;
}
@@ -693,67 +779,116 @@
static ssize_t show_ledd(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%08x\n", hotk->ledd_status);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n", asus->ledd_status);
}
static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
- if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_LEDD, value))
pr_warning("LED display write failed\n");
else
- hotk->ledd_status = (u32) value;
+ asus->ledd_status = (u32) value;
}
return rv;
}
/*
+ * Wireless
+ */
+static int asus_wireless_status(struct asus_laptop *asus, int mask)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ if (!asus->have_rsts)
+ return (asus->wireless_status & mask) ? 1 : 0;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading Wireless status\n");
+ return -EINVAL;
+ }
+ return !!(status & mask);
+}
+
+/*
* WLAN
*/
+static int asus_wlan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
+ pr_warning("Error setting wlan status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(WL_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
}
static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, wl_switch_handle, WL_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
}
/*
* Bluetooth
*/
+static int asus_bluetooth_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
+ pr_warning("Error setting bluetooth status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(BT_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
}
static ssize_t store_bluetooth(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- return store_status(buf, count, bt_switch_handle, BT_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
}
/*
* Display
*/
-static void set_display(int value)
+static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
- if (write_acpi_int(display_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
pr_warning("Error setting display\n");
return;
}
-static int read_display(void)
+static int read_display(struct asus_laptop *asus)
{
unsigned long long value = 0;
acpi_status rv = AE_OK;
@@ -769,7 +904,7 @@
pr_warning("Error reading display status\n");
}
- value &= 0x0F; /* needed for some models, shouldn't hurt others */
+ value &= 0x0F; /* needed for some models, shouldn't hurt others */
return value;
}
@@ -781,7 +916,11 @@
static ssize_t show_disp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_display());
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ if (!display_get_handle)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", read_display(asus));
}
/*
@@ -794,65 +933,72 @@
static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_display(value);
+ asus_set_display(asus, value);
return rv;
}
/*
* Light Sens
*/
-static void set_light_sens_switch(int value)
+static void asus_als_switch(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
pr_warning("Error setting light sensor switch\n");
- hotk->light_switch = value;
+ asus->light_switch = value;
}
static ssize_t show_lssw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_switch);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_switch);
}
static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_light_sens_switch(value ? 1 : 0);
+ asus_als_switch(asus, value ? 1 : 0);
return rv;
}
-static void set_light_sens_level(int value)
+static void asus_als_level(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_level_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
pr_warning("Error setting light sensor level\n");
- hotk->light_level = value;
+ asus->light_level = value;
}
static ssize_t show_lslvl(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_level);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_level);
}
static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
- set_light_sens_level(value);
+ asus_als_level(asus, value);
}
return rv;
@@ -861,197 +1007,309 @@
/*
* GPS
*/
+static int asus_gps_status(struct asus_laptop *asus)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading GPS status\n");
+ return -ENODEV;
+ }
+ return !!status;
+}
+
+static int asus_gps_switch(struct asus_laptop *asus, int status)
+{
+ const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
+
+ if (write_acpi_int(asus->handle, meth, 0x02))
+ return -ENODEV;
+ return 0;
+}
+
static ssize_t show_gps(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(GPS_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_gps_status(asus));
}
static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, NULL, GPS_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+ int ret;
+
+ rv = parse_arg(buf, count, &value);
+ if (rv <= 0)
+ return -EINVAL;
+ ret = asus_gps_switch(asus, !!value);
+ if (ret)
+ return ret;
+ rfkill_set_sw_state(asus->gps_rfkill, !value);
+ return rv;
}
/*
- * Hotkey functions
+ * rfkill
*/
-static struct key_entry *asus_get_entry_by_scancode(int code)
+static int asus_gps_rfkill_set(void *data, bool blocked)
{
- struct key_entry *key;
+ acpi_handle handle = data;
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
-
- return NULL;
+ return asus_gps_switch(handle, !blocked);
}
-static struct key_entry *asus_get_entry_by_keycode(int code)
+static const struct rfkill_ops asus_gps_rfkill_ops = {
+ .set_block = asus_gps_rfkill_set,
+};
+
+static void asus_rfkill_exit(struct asus_laptop *asus)
{
- struct key_entry *key;
-
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
-
- return NULL;
-}
-
-static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode)
-{
- struct key_entry *key = asus_get_entry_by_scancode(scancode);
-
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
+ if (asus->gps_rfkill) {
+ rfkill_unregister(asus->gps_rfkill);
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
}
-
- return -EINVAL;
}
-static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
+static int asus_rfkill_init(struct asus_laptop *asus)
{
- struct key_entry *key;
- int old_keycode;
+ int result;
- if (keycode < 0 || keycode > KEY_MAX)
+ if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
+ return 0;
+
+ asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
+ RFKILL_TYPE_GPS,
+ &asus_gps_rfkill_ops, NULL);
+ if (!asus->gps_rfkill)
return -EINVAL;
- key = asus_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!asus_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
+ result = rfkill_register(asus->gps_rfkill);
+ if (result) {
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
}
- return -EINVAL;
+ return result;
}
-static void asus_hotk_notify(struct acpi_device *device, u32 event)
+/*
+ * Input device (i.e. hotkeys)
+ */
+static void asus_input_notify(struct asus_laptop *asus, int event)
{
- static struct key_entry *key;
- u16 count;
+ if (asus->inputdev)
+ sparse_keymap_report_event(asus->inputdev, event, 1, true);
+}
- /* TODO Find a better way to handle events count. */
- if (!hotk)
- return;
+static int asus_input_init(struct asus_laptop *asus)
+{
+ struct input_dev *input;
+ int error;
+
+ input = input_allocate_device();
+ if (!input) {
+ pr_info("Unable to allocate input device\n");
+ return 0;
+ }
+ input->name = "Asus Laptop extra buttons";
+ input->phys = ASUS_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &asus->platform_device->dev;
+ input_set_drvdata(input, asus);
+
+ error = sparse_keymap_setup(input, asus_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_keymap;
+ }
+ error = input_register_device(input);
+ if (error) {
+ pr_info("Unable to register input device\n");
+ goto err_device;
+ }
+
+ asus->inputdev = input;
+ return 0;
+
+err_keymap:
+ sparse_keymap_free(input);
+err_device:
+ input_free_device(input);
+ return error;
+}
+
+static void asus_input_exit(struct asus_laptop *asus)
+{
+ if (asus->inputdev) {
+ sparse_keymap_free(asus->inputdev);
+ input_unregister_device(asus->inputdev);
+ }
+}
+
+/*
+ * ACPI driver
+ */
+static void asus_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct asus_laptop *asus = acpi_driver_data(device);
+ u16 count;
/*
* We need to tell the backlight device when the backlight power is
* switched
*/
- if (event == ATKD_LCD_ON) {
- write_status(NULL, 1, LCD_ON);
- lcd_blank(FB_BLANK_UNBLANK);
- } else if (event == ATKD_LCD_OFF) {
- write_status(NULL, 0, LCD_ON);
- lcd_blank(FB_BLANK_POWERDOWN);
- }
+ if (event == ATKD_LCD_ON)
+ lcd_blank(asus, FB_BLANK_UNBLANK);
+ else if (event == ATKD_LCD_OFF)
+ lcd_blank(asus, FB_BLANK_POWERDOWN);
- count = hotk->event_count[event % 128]++;
- acpi_bus_generate_proc_event(hotk->device, event, count);
- acpi_bus_generate_netlink_event(hotk->device->pnp.device_class,
- dev_name(&hotk->device->dev), event,
+ /* TODO Find a better way to handle events count. */
+ count = asus->event_count[event % 128]++;
+ acpi_bus_generate_proc_event(asus->device, event, count);
+ acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
+ dev_name(&asus->device->dev), event,
count);
- if (hotk->inputdev) {
- key = asus_get_entry_by_scancode(event);
- if (!key)
- return ;
+ /* Brightness events are special */
+ if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(hotk->inputdev, key->keycode, 1);
- input_sync(hotk->inputdev);
- input_report_key(hotk->inputdev, key->keycode, 0);
- input_sync(hotk->inputdev);
- break;
+ /* Ignore them completely if the acpi video driver is used */
+ if (asus->backlight_device != NULL) {
+ /* Update the backlight device. */
+ asus_backlight_notify(asus);
}
+ return ;
}
+ asus_input_notify(asus, event);
}
-#define ASUS_CREATE_DEVICE_ATTR(_name) \
- struct device_attribute dev_attr_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = 0 }, \
- .show = NULL, \
- .store = NULL, \
- }
+static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
+static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
+static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
+ store_bluetooth);
+static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
+static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
+static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
+static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
+static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
-#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
- do { \
- dev_attr_##_name.attr.mode = _mode; \
- dev_attr_##_name.show = _show; \
- dev_attr_##_name.store = _store; \
- } while(0)
-
-static ASUS_CREATE_DEVICE_ATTR(infos);
-static ASUS_CREATE_DEVICE_ATTR(wlan);
-static ASUS_CREATE_DEVICE_ATTR(bluetooth);
-static ASUS_CREATE_DEVICE_ATTR(display);
-static ASUS_CREATE_DEVICE_ATTR(ledd);
-static ASUS_CREATE_DEVICE_ATTR(ls_switch);
-static ASUS_CREATE_DEVICE_ATTR(ls_level);
-static ASUS_CREATE_DEVICE_ATTR(gps);
-
-static struct attribute *asuspf_attributes[] = {
- &dev_attr_infos.attr,
- &dev_attr_wlan.attr,
- &dev_attr_bluetooth.attr,
- &dev_attr_display.attr,
- &dev_attr_ledd.attr,
- &dev_attr_ls_switch.attr,
- &dev_attr_ls_level.attr,
- &dev_attr_gps.attr,
- NULL
-};
-
-static struct attribute_group asuspf_attribute_group = {
- .attrs = asuspf_attributes
-};
-
-static struct platform_driver asuspf_driver = {
- .driver = {
- .name = ASUS_HOTK_FILE,
- .owner = THIS_MODULE,
- }
-};
-
-static struct platform_device *asuspf_device;
-
-static void asus_hotk_add_fs(void)
+static void asus_sysfs_exit(struct asus_laptop *asus)
{
- ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
+ struct platform_device *device = asus->platform_device;
- if (wl_switch_handle)
- ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
+ device_remove_file(&device->dev, &dev_attr_infos);
+ device_remove_file(&device->dev, &dev_attr_wlan);
+ device_remove_file(&device->dev, &dev_attr_bluetooth);
+ device_remove_file(&device->dev, &dev_attr_display);
+ device_remove_file(&device->dev, &dev_attr_ledd);
+ device_remove_file(&device->dev, &dev_attr_ls_switch);
+ device_remove_file(&device->dev, &dev_attr_ls_level);
+ device_remove_file(&device->dev, &dev_attr_gps);
+}
- if (bt_switch_handle)
- ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
- show_bluetooth, store_bluetooth);
+static int asus_sysfs_init(struct asus_laptop *asus)
+{
+ struct platform_device *device = asus->platform_device;
+ int err;
- if (display_set_handle && display_get_handle)
- ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
- else if (display_set_handle)
- ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
+ err = device_create_file(&device->dev, &dev_attr_infos);
+ if (err)
+ return err;
- if (ledd_set_handle)
- ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
-
- if (ls_switch_handle && ls_level_handle) {
- ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
- ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
+ if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_wlan);
+ if (err)
+ return err;
}
- if (gps_status_handle && gps_on_handle && gps_off_handle)
- ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
+ if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_bluetooth);
+ if (err)
+ return err;
+ }
+
+ if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_display);
+ if (err)
+ return err;
+ }
+
+ if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ledd);
+ if (err)
+ return err;
+ }
+
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ls_switch);
+ if (err)
+ return err;
+ err = device_create_file(&device->dev, &dev_attr_ls_level);
+ if (err)
+ return err;
+ }
+
+ if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_gps);
+ if (err)
+ return err;
+ }
+
+ return err;
}
+static int asus_platform_init(struct asus_laptop *asus)
+{
+ int err;
+
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
+ if (!asus->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(asus->platform_device, asus);
+
+ err = platform_device_add(asus->platform_device);
+ if (err)
+ goto fail_platform_device;
+
+ err = asus_sysfs_init(asus);
+ if (err)
+ goto fail_sysfs;
+ return 0;
+
+fail_sysfs:
+ asus_sysfs_exit(asus);
+ platform_device_del(asus->platform_device);
+fail_platform_device:
+ platform_device_put(asus->platform_device);
+ return err;
+}
+
+static void asus_platform_exit(struct asus_laptop *asus)
+{
+ asus_sysfs_exit(asus);
+ platform_device_unregister(asus->platform_device);
+}
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = ASUS_LAPTOP_FILE,
+ .owner = THIS_MODULE,
+ }
+};
+
static int asus_handle_init(char *name, acpi_handle * handle,
char **paths, int num_paths)
{
@@ -1073,10 +1331,11 @@
ARRAY_SIZE(object##_paths))
/*
- * This function is used to initialize the hotk with right values. In this
- * method, we can make all the detection we want, and modify the hotk struct
+ * This function is used to initialize the context with right values. In this
+ * method, we can make all the detection we want, and modify the asus_laptop
+ * struct
*/
-static int asus_hotk_get_info(void)
+static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
@@ -1089,22 +1348,21 @@
* models, but late enough to allow acpi_bus_register_driver() to fail
* before doing anything ACPI-specific. Should we encounter a machine,
* which needs special handling (i.e. its hotkey device has a different
- * HID), this bit will be moved. A global variable asus_info contains
- * the DSDT header.
+ * HID), this bit will be moved.
*/
- status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
- if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
status =
- acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
+ acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
pr_warning("Error calling BSTS\n");
else if (bsts_result)
@@ -1112,8 +1370,8 @@
(uint) bsts_result);
/* This too ... */
- write_acpi_int(hotk->handle, "CWAP", wapf, NULL);
-
+ if (write_acpi_int(asus->handle, "CWAP", wapf))
+ pr_err("Error calling CWAP(%d)\n", wapf);
/*
* Try to match the object returned by INIT to the specific model.
* Handle every possible object (or the lack of thereof) the DSDT
@@ -1134,397 +1392,210 @@
break;
}
}
- hotk->name = kstrdup(string, GFP_KERNEL);
- if (!hotk->name)
+ asus->name = kstrdup(string, GFP_KERNEL);
+ if (!asus->name)
return -ENOMEM;
if (*string)
pr_notice(" %s model detected\n", string);
- ASUS_HANDLE_INIT(mled_set);
- ASUS_HANDLE_INIT(tled_set);
- ASUS_HANDLE_INIT(rled_set);
- ASUS_HANDLE_INIT(pled_set);
- ASUS_HANDLE_INIT(gled_set);
-
- ASUS_HANDLE_INIT(ledd_set);
-
- ASUS_HANDLE_INIT(kled_set);
- ASUS_HANDLE_INIT(kled_get);
-
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
* The significance of others is yet to be found.
- * If we don't find the method, we assume the device are present.
*/
status =
- acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result);
- if (ACPI_FAILURE(status))
- hwrs_result = WL_HWRS | BT_HWRS;
+ acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
+ if (!ACPI_FAILURE(status))
+ pr_notice(" HRWS returned %x", (int)hwrs_result);
- if (hwrs_result & WL_HWRS)
- ASUS_HANDLE_INIT(wl_switch);
- if (hwrs_result & BT_HWRS)
- ASUS_HANDLE_INIT(bt_switch);
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
- ASUS_HANDLE_INIT(wireless_status);
-
- ASUS_HANDLE_INIT(brightness_set);
- ASUS_HANDLE_INIT(brightness_get);
-
+ /* Scheduled for removal */
ASUS_HANDLE_INIT(lcd_switch);
-
- ASUS_HANDLE_INIT(display_set);
ASUS_HANDLE_INIT(display_get);
- /*
- * There is a lot of models with "ALSL", but a few get
- * a real light sens, so we need to check it.
- */
- if (!ASUS_HANDLE_INIT(ls_switch))
- ASUS_HANDLE_INIT(ls_level);
-
- ASUS_HANDLE_INIT(gps_on);
- ASUS_HANDLE_INIT(gps_off);
- ASUS_HANDLE_INIT(gps_status);
-
kfree(model);
return AE_OK;
}
-static int asus_input_init(void)
-{
- const struct key_entry *key;
- int result;
+static bool asus_device_present;
- hotk->inputdev = input_allocate_device();
- if (!hotk->inputdev) {
- pr_info("Unable to allocate input device\n");
- return 0;
- }
- hotk->inputdev->name = "Asus Laptop extra buttons";
- hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
- hotk->inputdev->id.bustype = BUS_HOST;
- hotk->inputdev->getkeycode = asus_getkeycode;
- hotk->inputdev->setkeycode = asus_setkeycode;
-
- for (key = asus_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, hotk->inputdev->evbit);
- set_bit(key->keycode, hotk->inputdev->keybit);
- break;
- }
- }
- result = input_register_device(hotk->inputdev);
- if (result) {
- pr_info("Unable to register input device\n");
- input_free_device(hotk->inputdev);
- }
- return result;
-}
-
-static int asus_hotk_check(void)
+static int __devinit asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
- result = acpi_bus_get_status(hotk->device);
+ result = acpi_bus_get_status(asus->device);
+ if (result)
+ return result;
+ if (!asus->device->status.present) {
+ pr_err("Hotkey device not present, aborting\n");
+ return -ENODEV;
+ }
+
+ result = asus_laptop_get_info(asus);
if (result)
return result;
- if (hotk->device->status.present) {
- result = asus_hotk_get_info();
- } else {
- pr_err("Hotkey device not present, aborting\n");
- return -EINVAL;
+ /* WLED and BLED are on by default */
+ if (bluetooth_status >= 0)
+ asus_bluetooth_set(asus, !!bluetooth_status);
+
+ if (wlan_status >= 0)
+ asus_wlan_set(asus, !!wlan_status);
+
+ /* Keyboard Backlight is on by default */
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
+ asus_kled_set(asus, 1);
+
+ /* LED display is off by default */
+ asus->ledd_status = 0xFFF;
+
+ /* Set initial values of light sensor and level */
+ asus->light_switch = 0; /* Default to light sensor disabled */
+ asus->light_level = 5; /* level 5 for sensor sensitivity */
+
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ asus_als_switch(asus, asus->light_switch);
+ asus_als_level(asus, asus->light_level);
}
+ asus->lcd_state = 1; /* LCD should be on when the module load */
return result;
}
-static int asus_hotk_found;
-
-static int asus_hotk_add(struct acpi_device *device)
+static int __devinit asus_acpi_add(struct acpi_device *device)
{
+ struct asus_laptop *asus;
int result;
pr_notice("Asus Laptop Support version %s\n",
- ASUS_LAPTOP_VERSION);
-
- hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
- if (!hotk)
+ ASUS_LAPTOP_VERSION);
+ asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
+ if (!asus)
return -ENOMEM;
+ asus->handle = device->handle;
+ strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
+ device->driver_data = asus;
+ asus->device = device;
- hotk->handle = device->handle;
- strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
- device->driver_data = hotk;
- hotk->device = device;
-
- result = asus_hotk_check();
+ result = asus_acpi_init(asus);
if (result)
- goto end;
+ goto fail_platform;
- asus_hotk_add_fs();
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ */
+ result = asus_platform_init(asus);
+ if (result)
+ goto fail_platform;
- asus_hotk_found = 1;
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(asus);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
- /* WLED and BLED are on by default */
- write_status(bt_switch_handle, 1, BT_ON);
- write_status(wl_switch_handle, 1, WL_ON);
+ result = asus_input_init(asus);
+ if (result)
+ goto fail_input;
- /* If the h/w switch is off, we need to check the real status */
- write_status(NULL, read_status(BT_ON), BT_ON);
- write_status(NULL, read_status(WL_ON), WL_ON);
+ result = asus_led_init(asus);
+ if (result)
+ goto fail_led;
- /* LCD Backlight is on by default */
- write_status(NULL, 1, LCD_ON);
+ result = asus_rfkill_init(asus);
+ if (result)
+ goto fail_rfkill;
- /* Keyboard Backlight is on by default */
- if (kled_set_handle)
- set_kled_lvl(1);
+ asus_device_present = true;
+ return 0;
- /* LED display is off by default */
- hotk->ledd_status = 0xFFF;
-
- /* Set initial values of light sensor and level */
- hotk->light_switch = 0; /* Default to light sensor disabled */
- hotk->light_level = 5; /* level 5 for sensor sensitivity */
-
- if (ls_switch_handle)
- set_light_sens_switch(hotk->light_switch);
-
- if (ls_level_handle)
- set_light_sens_level(hotk->light_level);
-
- /* GPS is on by default */
- write_status(NULL, 1, GPS_ON);
-
-end:
- if (result) {
- kfree(hotk->name);
- kfree(hotk);
- }
+fail_rfkill:
+ asus_led_exit(asus);
+fail_led:
+ asus_input_exit(asus);
+fail_input:
+ asus_backlight_exit(asus);
+fail_backlight:
+ asus_platform_exit(asus);
+fail_platform:
+ kfree(asus->name);
+ kfree(asus);
return result;
}
-static int asus_hotk_remove(struct acpi_device *device, int type)
+static int asus_acpi_remove(struct acpi_device *device, int type)
{
- kfree(hotk->name);
- kfree(hotk);
+ struct asus_laptop *asus = acpi_driver_data(device);
+ asus_backlight_exit(asus);
+ asus_rfkill_exit(asus);
+ asus_led_exit(asus);
+ asus_input_exit(asus);
+ asus_platform_exit(asus);
+
+ kfree(asus->name);
+ kfree(asus);
return 0;
}
-static void asus_backlight_exit(void)
-{
- if (asus_backlight_device)
- backlight_device_unregister(asus_backlight_device);
-}
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"ATK0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
-#define ASUS_LED_UNREGISTER(object) \
- if (object##_led.dev) \
- led_classdev_unregister(&object##_led)
-
-static void asus_led_exit(void)
-{
- destroy_workqueue(led_workqueue);
- ASUS_LED_UNREGISTER(mled);
- ASUS_LED_UNREGISTER(tled);
- ASUS_LED_UNREGISTER(pled);
- ASUS_LED_UNREGISTER(rled);
- ASUS_LED_UNREGISTER(gled);
- ASUS_LED_UNREGISTER(kled);
-}
-
-static void asus_input_exit(void)
-{
- if (hotk->inputdev)
- input_unregister_device(hotk->inputdev);
-}
-
-static void __exit asus_laptop_exit(void)
-{
- asus_backlight_exit();
- asus_led_exit();
- asus_input_exit();
-
- acpi_bus_unregister_driver(&asus_hotk_driver);
- sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
- platform_device_unregister(asuspf_device);
- platform_driver_unregister(&asuspf_driver);
-}
-
-static int asus_backlight_init(struct device *dev)
-{
- struct backlight_device *bd;
-
- if (brightness_set_handle && lcd_switch_handle) {
- bd = backlight_device_register(ASUS_HOTK_FILE, dev,
- NULL, &asusbl_ops);
- if (IS_ERR(bd)) {
- pr_err("Could not register asus backlight device\n");
- asus_backlight_device = NULL;
- return PTR_ERR(bd);
- }
-
- asus_backlight_device = bd;
-
- bd->props.max_brightness = 15;
- bd->props.brightness = read_brightness(NULL);
- bd->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- }
- return 0;
-}
-
-static int asus_led_register(acpi_handle handle,
- struct led_classdev *ldev, struct device *dev)
-{
- if (!handle)
- return 0;
-
- return led_classdev_register(dev, ldev);
-}
-
-#define ASUS_LED_REGISTER(object, device) \
- asus_led_register(object##_set_handle, &object##_led, device)
-
-static int asus_led_init(struct device *dev)
-{
- int rv;
-
- rv = ASUS_LED_REGISTER(mled, dev);
- if (rv)
- goto out;
-
- rv = ASUS_LED_REGISTER(tled, dev);
- if (rv)
- goto out1;
-
- rv = ASUS_LED_REGISTER(rled, dev);
- if (rv)
- goto out2;
-
- rv = ASUS_LED_REGISTER(pled, dev);
- if (rv)
- goto out3;
-
- rv = ASUS_LED_REGISTER(gled, dev);
- if (rv)
- goto out4;
-
- if (kled_set_handle && kled_get_handle)
- rv = ASUS_LED_REGISTER(kled, dev);
- if (rv)
- goto out5;
-
- led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!led_workqueue)
- goto out6;
-
- return 0;
-out6:
- rv = -ENOMEM;
- ASUS_LED_UNREGISTER(kled);
-out5:
- ASUS_LED_UNREGISTER(gled);
-out4:
- ASUS_LED_UNREGISTER(pled);
-out3:
- ASUS_LED_UNREGISTER(rled);
-out2:
- ASUS_LED_UNREGISTER(tled);
-out1:
- ASUS_LED_UNREGISTER(mled);
-out:
- return rv;
-}
+static struct acpi_driver asus_acpi_driver = {
+ .name = ASUS_LAPTOP_NAME,
+ .class = ASUS_LAPTOP_CLASS,
+ .owner = THIS_MODULE,
+ .ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = asus_acpi_add,
+ .remove = asus_acpi_remove,
+ .notify = asus_acpi_notify,
+ },
+};
static int __init asus_laptop_init(void)
{
int result;
- result = acpi_bus_register_driver(&asus_hotk_driver);
+ result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
- /*
- * This is a bit of a kludge. We only want this module loaded
- * for ASUS systems, but there's currently no way to probe the
- * ACPI namespace for ASUS HIDs. So we just return failure if
- * we didn't find one, which will cause the module to be
- * unloaded.
- */
- if (!asus_hotk_found) {
- acpi_bus_unregister_driver(&asus_hotk_driver);
- return -ENODEV;
+ result = acpi_bus_register_driver(&asus_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+ if (!asus_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
}
-
- result = asus_input_init();
- if (result)
- goto fail_input;
-
- /* Register platform stuff */
- result = platform_driver_register(&asuspf_driver);
- if (result)
- goto fail_platform_driver;
-
- asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
- if (!asuspf_device) {
- result = -ENOMEM;
- goto fail_platform_device1;
- }
-
- result = platform_device_add(asuspf_device);
- if (result)
- goto fail_platform_device2;
-
- result = sysfs_create_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
- if (result)
- goto fail_sysfs;
-
- result = asus_led_init(&asuspf_device->dev);
- if (result)
- goto fail_led;
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(&asuspf_device->dev);
- if (result)
- goto fail_backlight;
- } else
- pr_info("Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
return 0;
-fail_backlight:
- asus_led_exit();
-
-fail_led:
- sysfs_remove_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
-
-fail_sysfs:
- platform_device_del(asuspf_device);
-
-fail_platform_device2:
- platform_device_put(asuspf_device);
-
-fail_platform_device1:
- platform_driver_unregister(&asuspf_driver);
-
-fail_platform_driver:
- asus_input_exit();
-
-fail_input:
-
+fail_no_device:
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
return result;
}
+static void __exit asus_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+ platform_driver_unregister(&platform_driver);
+}
+
module_init(asus_laptop_init);
module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index c1d2aee..1381430 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1225,9 +1225,8 @@
else if (strncmp(model, "M2N", 3) == 0 ||
strncmp(model, "M3N", 3) == 0 ||
strncmp(model, "M5N", 3) == 0 ||
- strncmp(model, "M6N", 3) == 0 ||
strncmp(model, "S1N", 3) == 0 ||
- strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0)
+ strncmp(model, "S5N", 3) == 0)
return xxN;
else if (strncmp(model, "M1", 2) == 0)
return M1A;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 8cb20e4..035a7dd 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -507,6 +507,10 @@
KEY_BRIGHTNESSDOWN,
KEY_BRIGHTNESSUP,
KEY_VENDOR,
+ KEY_UNKNOWN,
+ KEY_CAMERA,
+ KEY_BACK,
+ KEY_FORWARD,
KEY_MAX
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b7f4d27..ef61497 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -132,8 +132,8 @@
};
static struct calling_interface_buffer *buffer;
-struct page *bufferpage;
-DEFINE_MUTEX(buffer_mutex);
+static struct page *bufferpage;
+static DEFINE_MUTEX(buffer_mutex);
static int hwswitch_state;
@@ -580,6 +580,7 @@
fail_backlight:
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
fail_filter:
dell_cleanup_rfkill();
fail_rfkill:
@@ -597,12 +598,12 @@
static void __exit dell_exit(void)
{
- cancel_delayed_work_sync(&dell_rfkill_work);
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
dell_cleanup_rfkill();
if (platform_device) {
- platform_device_del(platform_device);
+ platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
kfree(da_tokens);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e2be6bb..9a844ca 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,6 +578,8 @@
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
+ bool absent;
+ u32 l;
if (eeepc->wlan_rfkill)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
@@ -591,6 +593,22 @@
goto out_unlock;
}
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_unlock;
+ }
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warning("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warning("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
+ goto out_unlock;
+ }
+
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
@@ -1277,7 +1295,8 @@
* hotplug code. In fact, current hotplug code seems to unplug another
* device...
*/
- if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) {
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
+ strcmp(model, "1005PE") == 0) {
eeepc->hotplug_disabled = true;
pr_info("wlan hotplug disabled\n");
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb603f1..e7b0c3b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -286,6 +286,7 @@
char param[32];
int (*init) (struct ibm_init_struct *);
+ mode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -2082,6 +2083,7 @@
static void tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_driver_event(const unsigned int scancode);
+static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2264,6 +2266,8 @@
rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
mutex_unlock(&hotkey_mutex);
return rc;
@@ -2548,7 +2552,7 @@
}
/* call with hotkey_mutex held */
-static void hotkey_poll_setup(bool may_warn)
+static void hotkey_poll_setup(const bool may_warn)
{
const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2579,7 +2583,7 @@
}
}
-static void hotkey_poll_setup_safe(bool may_warn)
+static void hotkey_poll_setup_safe(const bool may_warn)
{
mutex_lock(&hotkey_mutex);
hotkey_poll_setup(may_warn);
@@ -2597,7 +2601,11 @@
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-static void hotkey_poll_setup_safe(bool __unused)
+static void hotkey_poll_setup(const bool __unused)
+{
+}
+
+static void hotkey_poll_setup_safe(const bool __unused)
{
}
@@ -2607,16 +2615,11 @@
{
switch (tpacpi_lifecycle) {
case TPACPI_LIFE_INIT:
- /*
- * hotkey_init will call hotkey_poll_setup_safe
- * at the appropriate moment
- */
- return 0;
- case TPACPI_LIFE_EXITING:
- return -EBUSY;
case TPACPI_LIFE_RUNNING:
hotkey_poll_setup_safe(false);
return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
}
/* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2627,7 +2630,7 @@
static void hotkey_inputdev_close(struct input_dev *dev)
{
/* disable hotkey polling when possible */
- if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
!(hotkey_source_mask & hotkey_driver_mask))
hotkey_poll_setup_safe(false);
}
@@ -3655,13 +3658,19 @@
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
- if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
+ switch (hkey) {
+ case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
- } else {
+ break;
+ case TP_HKEY_EV_OPTDRV_EJ:
+ /* FIXME: kick libata if SATA link offline */
+ known_ev = true;
+ break;
+ default:
known_ev = false;
}
break;
@@ -3870,7 +3879,7 @@
TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
enum {
@@ -3916,10 +3925,11 @@
}
#endif
- /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
+ status = TP_ACPI_BLUETOOTH_RADIOSSW
+ | TP_ACPI_BLUETOOTH_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4070,7 +4080,7 @@
TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4107,10 +4117,11 @@
}
#endif
- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
- status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_WANCARD_RADIOSSW;
+ status = TP_ACPI_WANCARD_RADIOSSW
+ | TP_ACPI_WANCARD_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4619,6 +4630,10 @@
return 0;
}
+ /* Even reads can crash X.org, so... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
status = video_outputsw_get();
if (status < 0)
return status;
@@ -4652,6 +4667,10 @@
if (video_supported == TPACPI_VIDEO_NONE)
return -ENODEV;
+ /* Even reads can crash X.org, let alone writes... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
enable = 0;
disable = 0;
@@ -6133,13 +6152,13 @@
TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
/* Models with ATI GPUs that can use ECNVRAM */
- TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel Extreme Graphics 2 */
- TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
@@ -6522,7 +6541,8 @@
return volume_set_status_ec(status);
}
-static int volume_set_mute_ec(const bool mute)
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_mute_ec(const bool mute)
{
int rc;
u8 s, n;
@@ -6537,22 +6557,37 @@
n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
s & ~TP_EC_AUDIO_MUTESW_MSK;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
-static int volume_set_mute(const bool mute)
+static int volume_alsa_set_mute(const bool mute)
{
- dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
+ dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
(mute) ? "" : "un");
- return volume_set_mute_ec(mute);
+ return __volume_set_mute_ec(mute);
}
-static int volume_set_volume_ec(const u8 vol)
+static int volume_set_mute(const bool mute)
+{
+ int rc;
+
+ dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
+ (mute) ? "" : "un");
+
+ rc = __volume_set_mute_ec(mute);
+ return (rc < 0) ? rc : 0;
+}
+
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_volume_ec(const u8 vol)
{
int rc;
u8 s, n;
@@ -6569,19 +6604,22 @@
n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
-static int volume_set_volume(const u8 vol)
+static int volume_alsa_set_volume(const u8 vol)
{
dbg_printk(TPACPI_DBG_MIXER,
- "trying to set volume level to %hu\n", vol);
- return volume_set_volume_ec(vol);
+ "ALSA: trying to set volume level to %hu\n", vol);
+ return __volume_set_volume_ec(vol);
}
static void volume_alsa_notify_change(void)
@@ -6628,7 +6666,7 @@
static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_volume(ucontrol->value.integer.value[0]);
+ return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
}
#define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@ -6651,7 +6689,7 @@
static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_mute(!ucontrol->value.integer.value[0]);
+ return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
}
static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@ -8477,9 +8515,10 @@
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode;
+ mode_t mode = iibm->base_procfs_mode;
- mode = S_IRUGO;
+ if (!mode)
+ mode = S_IRUGO;
if (ibm->write)
mode |= S_IWUSR;
entry = proc_create_data(ibm->name, mode, proc_dir,
@@ -8670,6 +8709,7 @@
#ifdef CONFIG_THINKPAD_ACPI_VIDEO
{
.init = video_init,
+ .base_procfs_mode = S_IRUSR,
.data = &video_driver_data,
},
#endif
@@ -9032,6 +9072,9 @@
return ret;
}
}
+
+ tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
printk(TPACPI_ERR "unable to register input device\n");
@@ -9041,7 +9084,6 @@
tp_features.input_device_registered = 1;
}
- tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 26c2117..405b969 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -814,21 +814,23 @@
if (hci_result == HCI_SUCCESS) {
if (value == 0x100)
continue;
- else if (value & 0x80) {
- key = toshiba_acpi_get_entry_by_scancode
- (value & ~0x80);
- if (!key) {
- printk(MY_INFO "Unknown key %x\n",
- value & ~0x80);
- continue;
- }
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 1);
- input_sync(toshiba_acpi.hotkey_dev);
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 0);
- input_sync(toshiba_acpi.hotkey_dev);
+ /* act on key press; ignore key release */
+ if (value & 0x80)
+ continue;
+
+ key = toshiba_acpi_get_entry_by_scancode
+ (value);
+ if (!key) {
+ printk(MY_INFO "Unknown key %x\n",
+ value);
+ continue;
}
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 1);
+ input_sync(toshiba_acpi.hotkey_dev);
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 0);
+ input_sync(toshiba_acpi.hotkey_dev);
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index d935b2d..ae0251e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -153,8 +153,6 @@
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 0 };
-#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0]))
-
/* Sets or clears DTR/RTS on the requested line */
static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
{
@@ -1406,10 +1404,10 @@
USTCNT = ustcnt & ~USTCNT_TXEN;
again:
- for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == m68328_console_baud)
break;
- if (i >= sizeof(baud_table) / sizeof(baud_table[0])) {
+ if (i >= ARRAY_SIZE(baud_table)) {
m68328_console_baud = 9600;
goto again;
}
@@ -1435,7 +1433,7 @@
if (arg)
n = simple_strtoul(arg,NULL,0);
- for (i = 0; i < BAUD_TABLE_SIZE; i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == n)
break;
if (i < BAUD_TABLE_SIZE) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a81ff7b..7c4ebe6 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2690,6 +2690,15 @@
}
}
+static void
+serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+{
+ up->port.type = type;
+ up->port.fifosize = uart_config[type].fifo_size;
+ up->capabilities = uart_config[type].flags;
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+}
+
static void __init
serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
@@ -2706,6 +2715,10 @@
struct uart_8250_port *up = &serial8250_ports[i];
up->port.dev = dev;
+
+ if (up->port.flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(up, up->port.type);
+
uart_add_one_port(drv, &up->port);
}
}
@@ -3118,12 +3131,8 @@
if (port->dev)
uart->port.dev = port->dev;
- if (port->flags & UPF_FIXED_TYPE) {
- uart->port.type = port->type;
- uart->port.fifosize = uart_config[port->type].fifo_size;
- uart->capabilities = uart_config[port->type].flags;
- uart->tx_loadsz = uart_config[port->type].tx_loadsz;
- }
+ if (port->flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(uart, port->type);
set_io_from_upio(&uart->port);
/* Possibly override default I/O functions. */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index b28af13..01c012d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -760,7 +760,8 @@
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
- if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
+ (dev->device == PCI_DEVICE_ID_NETMOS_9865))
return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
@@ -1479,6 +1480,7 @@
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
+ pbn_b0_bt_4_115200,
pbn_b0_bt_8_115200,
pbn_b0_bt_1_460800,
@@ -1703,6 +1705,12 @@
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b0_bt_4_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b0_bt_8_115200] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 8,
@@ -3191,6 +3199,15 @@
0x1208, 0x0004, 0, 0,
pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1204, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
/*
* Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
*/
@@ -3649,6 +3666,18 @@
0, 0, pbn_b0_1_115200 },
/*
+ * Best Connectivity PCI Multi I/O cards
+ */
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3004,
+ 0, 0, pbn_b0_bt_4_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 888a0ce..746e070 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1418,42 +1418,37 @@
To compile this driver as a module, choose M here: the
module will be called bfin_sport_uart.
-choice
- prompt "Baud rate for Blackfin SPORT UART"
- depends on SERIAL_BFIN_SPORT
- default SERIAL_SPORT_BAUD_RATE_57600
+config SERIAL_BFIN_SPORT_CONSOLE
+ bool "Console on Blackfin sport emulated uart"
+ depends on SERIAL_BFIN_SPORT=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_BFIN_SPORT0_UART
+ bool "Enable UART over SPORT0"
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
help
- Choose a baud rate for the SPORT UART, other uart settings are
- 8 bit, 1 stop bit, no parity, no flow control.
+ Enable UART over SPORT0
-config SERIAL_SPORT_BAUD_RATE_115200
- bool "115200"
-
-config SERIAL_SPORT_BAUD_RATE_57600
- bool "57600"
-
-config SERIAL_SPORT_BAUD_RATE_38400
- bool "38400"
-
-config SERIAL_SPORT_BAUD_RATE_19200
- bool "19200"
-
-config SERIAL_SPORT_BAUD_RATE_9600
- bool "9600"
-endchoice
-
-config SPORT_BAUD_RATE
- int
+config SERIAL_BFIN_SPORT1_UART
+ bool "Enable UART over SPORT1"
depends on SERIAL_BFIN_SPORT
- default 115200 if (SERIAL_SPORT_BAUD_RATE_115200)
- default 57600 if (SERIAL_SPORT_BAUD_RATE_57600)
- default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
- default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
- default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+ help
+ Enable UART over SPORT1
+
+config SERIAL_BFIN_SPORT2_UART
+ bool "Enable UART over SPORT2"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT2
+
+config SERIAL_BFIN_SPORT3_UART
+ bool "Enable UART over SPORT3"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT3
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
- depends on MFD_TIMBERDALE
select SERIAL_CORE
---help---
Add support for UART controller on timberdale.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 9d948bc..2c9bf9b 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1213,6 +1213,24 @@
return ret;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int atmel_poll_get_char(struct uart_port *port)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+ cpu_relax();
+
+ return UART_GET_CHAR(port);
+}
+
+static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ cpu_relax();
+
+ UART_PUT_CHAR(port, ch);
+}
+#endif
+
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -1232,6 +1250,10 @@
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = atmel_poll_get_char,
+ .poll_put_char = atmel_poll_put_char,
+#endif
};
/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index 37ad0c4..a1a0e55 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -35,7 +35,7 @@
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
-#define BCM63XX_NR_UARTS 1
+#define BCM63XX_NR_UARTS 2
static struct uart_port ports[BCM63XX_NR_UARTS];
@@ -784,7 +784,7 @@
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
- .nr = 1,
+ .nr = BCM63XX_NR_UARTS,
.cons = BCM63XX_CONSOLE,
};
@@ -826,11 +826,12 @@
port->dev = &pdev->dev;
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
+ port->line = pdev->id;
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
- kfree(port);
+ ports[pdev->id].membase = 0;
return ret;
}
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 50abb7e..fcf273e 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -237,7 +238,8 @@
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- if (kgdb_connected && kgdboc_port_line == uart->port.line)
+ if (kgdb_connected && kgdboc_port_line == uart->port.line
+ && kgdboc_break_enabled)
if (ch == 0x3) {/* Ctrl + C */
kgdb_breakpoint();
return;
@@ -488,6 +490,7 @@
{
int x_pos, pos;
+ dma_disable_irq(uart->tx_dma_channel);
dma_disable_irq(uart->rx_dma_channel);
spin_lock_bh(&uart->port.lock);
@@ -521,6 +524,7 @@
}
spin_unlock_bh(&uart->port.lock);
+ dma_enable_irq(uart->tx_dma_channel);
dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -746,15 +750,6 @@
Status interrupt.\n");
}
- if (uart->cts_pin >= 0) {
- gpio_request(uart->cts_pin, DRIVER_NAME);
- gpio_direction_output(uart->cts_pin, 1);
- }
- if (uart->rts_pin >= 0) {
- gpio_request(uart->rts_pin, DRIVER_NAME);
- gpio_direction_output(uart->rts_pin, 0);
- }
-
/* CTS RTS PINs are negative assertive. */
UART_PUT_MCR(uart, ACTS);
UART_SET_IER(uart, EDSSI);
@@ -801,10 +796,6 @@
gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0)
- gpio_free(uart->cts_pin);
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
if (UART_GET_IER(uart) && EDSSI)
free_irq(uart->status_irq, uart);
#endif
@@ -1409,8 +1400,7 @@
continue;
uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
gpio_free(bfin_serial_ports[i].cts_pin);
gpio_free(bfin_serial_ports[i].rts_pin);
#endif
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 088bb35..7c72888 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -1,27 +1,11 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.c
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: drivers/serial/bfin_5xx.c by Aubrey Li.
- * Author: Roy Huang <roy.huang@analog.com>
+ * Copyright 2006-2009 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (c) 2006-2007 Analog Devices Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
/*
@@ -29,39 +13,18 @@
* http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
* This application note describe how to implement a UART on a Sharc DSP,
* but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
*/
-/* After reset, there is a prelude of low level pulse when transmit data first
- * time. No addtional pulse in following transmit.
- * According to document:
- * The SPORTs are ready to start transmitting or receiving data no later than
- * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
- * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
- * The first internal frame sync will occur one frame sync delay after the
- * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
- * ready.
- */
+/* #define DEBUG */
-/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes
- * sport receives data incorrectly. The following is Axel's words.
- * As EE-191, sport rx samples 3 times of the UART baudrate and takes the
- * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
- * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
- * byte due to buadrate drift, then the 30th sample of a byte, this sample is
- * also the third sample of the stop bit, will happens on the immediately
- * following start bit which will be thrown away and missed. Thus since parts
- * of the startbit will be missed and the receiver will begin to drift, the
- * effect accumulates over time until synchronization is lost.
- * If only require 2 samples of the stopbit (by sampling in total 29 samples),
- * then a to short byte as in the case above will be tolerated. Then the 1/3
- * early startbit will trigger a framesync since the last read is complete
- * after only 2/3 stopbit and framesync is active during the last 1/3 looking
- * for a possible early startbit. */
-
-//#define DEBUG
+#define DRV_NAME "bfin-sport-uart"
+#define DEVICE_NAME "ttySS"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -75,23 +38,36 @@
#include "bfin_sport_uart.h"
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
unsigned short bfin_uart_pin_req_sport0[] =
{P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
unsigned short bfin_uart_pin_req_sport1[] =
{P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-
-#define DRV_NAME "bfin-sport-uart"
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+unsigned short bfin_uart_pin_req_sport2[] =
+ {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
+ P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
+unsigned short bfin_uart_pin_req_sport3[] =
+ {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
+ P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
+#endif
struct sport_uart_port {
struct uart_port port;
- char *name;
-
- int tx_irq;
- int rx_irq;
int err_irq;
+ unsigned short csize;
+ unsigned short rxmask;
+ unsigned short txmask1;
+ unsigned short txmask2;
+ unsigned char stopb;
+/* unsigned char parib; */
};
static void sport_uart_tx_chars(struct sport_uart_port *up);
@@ -99,36 +75,42 @@
static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
{
- pr_debug("%s value:%x\n", __func__, value);
- /* Place a Start and Stop bit */
+ pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
+ up->txmask1, up->txmask2);
+
+ /* Place Start and Stop bits */
__asm__ __volatile__ (
- "R2 = b#01111111100;"
- "R3 = b#10000000001;"
- "%0 <<= 2;"
- "%0 = %0 & R2;"
- "%0 = %0 | R3;"
- : "=d"(value)
- : "d"(value)
- : "ASTAT", "R2", "R3"
+ "%[val] <<= 1;"
+ "%[val] = %[val] & %[mask1];"
+ "%[val] = %[val] | %[mask2];"
+ : [val]"+d"(value)
+ : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
+ : "ASTAT"
);
pr_debug("%s value:%x\n", __func__, value);
SPORT_PUT_TX(up, value);
}
-static inline unsigned int rx_one_byte(struct sport_uart_port *up)
+static inline unsigned char rx_one_byte(struct sport_uart_port *up)
{
- unsigned int value, extract;
+ unsigned int value;
+ unsigned char extract;
u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
- value = SPORT_GET_RX32(up);
- pr_debug("%s value:%x\n", __func__, value);
+ if ((up->csize + up->stopb) > 7)
+ value = SPORT_GET_RX32(up);
+ else
+ value = SPORT_GET_RX(up);
- /* Extract 8 bits data */
+ pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
+ up->csize, up->rxmask);
+
+ /* Extract data */
__asm__ __volatile__ (
"%[extr] = 0;"
- "%[mask1] = 0x1801(Z);"
- "%[mask2] = 0x0300(Z);"
+ "%[mask1] = %[rxmask];"
+ "%[mask2] = 0x0200(Z);"
"%[shift] = 0;"
"LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
".Lloop_s:"
@@ -138,9 +120,9 @@
"%[mask1] = %[mask1] - %[mask2];"
".Lloop_e:"
"%[shift] += 1;"
- : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
- [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
- : "d"(value), [lc]"a"(8)
+ : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
+ [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
+ : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
: "ASTAT", "LB0", "LC0", "LT0"
);
@@ -148,29 +130,28 @@
return extract;
}
-static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
+static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
{
- int tclkdiv, tfsdiv, rclkdiv;
+ int tclkdiv, rclkdiv;
+ unsigned int sclk = get_sclk();
- /* Set TCR1 and TCR2 */
- SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
- SPORT_PUT_TCR2(up, 10);
+ /* Set TCR1 and TCR2, TFSR is not enabled for uart */
+ SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
/* Set RCR1 and RCR2 */
SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
- SPORT_PUT_RCR2(up, 28);
+ SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
- tclkdiv = sclk/(2 * baud_rate) - 1;
- tfsdiv = 12;
- rclkdiv = sclk/(2 * baud_rate * 3) - 1;
+ tclkdiv = sclk / (2 * baud_rate) - 1;
+ rclkdiv = sclk / (2 * baud_rate * 2) - 1;
SPORT_PUT_TCLKDIV(up, tclkdiv);
- SPORT_PUT_TFSDIV(up, tfsdiv);
SPORT_PUT_RCLKDIV(up, rclkdiv);
SSYNC();
- pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n",
- __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv);
+ pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
+ __func__, sclk, baud_rate, tclkdiv, rclkdiv);
return 0;
}
@@ -181,23 +162,29 @@
struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch;
- do {
+ spin_lock(&up->port.lock);
+
+ while (SPORT_GET_STAT(up) & RXNE) {
ch = rx_one_byte(up);
up->port.icount.rx++;
- if (uart_handle_sysrq_char(&up->port, ch))
- ;
- else
+ if (!uart_handle_sysrq_char(&up->port, ch))
tty_insert_flip_char(tty, ch, TTY_NORMAL);
- } while (SPORT_GET_STAT(up) & RXNE);
+ }
tty_flip_buffer_push(tty);
+ spin_unlock(&up->port.lock);
+
return IRQ_HANDLED;
}
static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
{
- sport_uart_tx_chars(dev_id);
+ struct sport_uart_port *up = dev_id;
+
+ spin_lock(&up->port.lock);
+ sport_uart_tx_chars(up);
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -208,6 +195,8 @@
struct tty_struct *tty = up->port.state->port.tty;
unsigned int stat = SPORT_GET_STAT(up);
+ spin_lock(&up->port.lock);
+
/* Overflow in RX FIFO */
if (stat & ROVF) {
up->port.icount.overrun++;
@@ -216,15 +205,16 @@
}
/* These should not happen */
if (stat & (TOVF | TUVF | RUVF)) {
- printk(KERN_ERR "SPORT Error:%s %s %s\n",
- (stat & TOVF)?"TX overflow":"",
- (stat & TUVF)?"TX underflow":"",
- (stat & RUVF)?"RX underflow":"");
+ pr_err("SPORT Error:%s %s %s\n",
+ (stat & TOVF) ? "TX overflow" : "",
+ (stat & TUVF) ? "TX underflow" : "",
+ (stat & RUVF) ? "RX underflow" : "");
SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
}
SSYNC();
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -232,60 +222,37 @@
static int sport_startup(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- char buffer[20];
- int retval;
+ int ret;
pr_debug("%s enter\n", __func__);
- snprintf(buffer, 20, "%s rx", up->name);
- retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
- return retval;
+ ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
+ "SPORT_UART_RX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT RX interrupt\n");
+ return ret;
}
- snprintf(buffer, 20, "%s tx", up->name);
- retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
+ "SPORT_UART_TX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT TX interrupt\n");
goto fail1;
}
- snprintf(buffer, 20, "%s err", up->name);
- retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
+ "SPORT_UART_STATUS", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT status interrupt\n");
goto fail2;
}
- if (port->line) {
- if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
- goto fail3;
- } else {
- if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
- goto fail3;
- }
-
- sport_uart_setup(up, get_sclk(), port->uartclk);
-
- /* Enable receive interrupt */
- SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
- SSYNC();
-
return 0;
+ fail2:
+ free_irq(up->port.irq+1, up);
+ fail1:
+ free_irq(up->port.irq, up);
-
-fail3:
- printk(KERN_ERR DRV_NAME
- ": Requesting Peripherals failed\n");
-
- free_irq(up->err_irq, up);
-fail2:
- free_irq(up->tx_irq, up);
-fail1:
- free_irq(up->rx_irq, up);
-
- return retval;
-
+ return ret;
}
static void sport_uart_tx_chars(struct sport_uart_port *up)
@@ -344,20 +311,17 @@
static void sport_stop_tx(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- unsigned int stat;
pr_debug("%s enter\n", __func__);
- stat = SPORT_GET_STAT(up);
- while(!(stat & TXHRE)) {
- udelay(1);
- stat = SPORT_GET_STAT(up);
- }
/* Although the hold register is empty, last byte is still in shift
- * register and not sent out yet. If baud rate is lower than default,
- * delay should be longer. For example, if the baud rate is 9600,
- * the delay must be at least 2ms by experience */
- udelay(500);
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SSYNC();
@@ -370,6 +334,7 @@
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
+
/* Write data into SPORT FIFO before enable SPROT to transmit */
sport_uart_tx_chars(up);
@@ -403,37 +368,24 @@
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- pr_debug("%s enter\n", __func__);
+ dev_dbg(port->dev, "%s enter\n", __func__);
/* Disable sport */
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
SSYNC();
- if (port->line) {
- peripheral_free_list(bfin_uart_pin_req_sport1);
- } else {
- peripheral_free_list(bfin_uart_pin_req_sport0);
- }
-
- free_irq(up->rx_irq, up);
- free_irq(up->tx_irq, up);
+ free_irq(up->port.irq, up);
+ free_irq(up->port.irq+1, up);
free_irq(up->err_irq, up);
}
-static void sport_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
-{
- pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
- uart_update_timeout(port, CS8 ,port->uartclk);
-}
-
static const char *sport_type(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
- return up->name;
+ return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
}
static void sport_release_port(struct uart_port *port)
@@ -461,6 +413,110 @@
return 0;
}
+static void sport_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ up->csize = 8;
+ break;
+ case CS7:
+ up->csize = 7;
+ break;
+ case CS6:
+ up->csize = 6;
+ break;
+ case CS5:
+ up->csize = 5;
+ break;
+ default:
+ pr_warning("requested word length not supported\n");
+ }
+
+ if (termios->c_cflag & CSTOPB) {
+ up->stopb = 1;
+ }
+ if (termios->c_cflag & PARENB) {
+ pr_warning("PAREN bits is not supported yet\n");
+ /* up->parib = 1; */
+ }
+
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= FE | PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= OE;
+ }
+
+ /* RX extract mask */
+ up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
+ /* TX masks, 8 bit data and 1 bit stop for example:
+ * mask1 = b#0111111110
+ * mask2 = b#1000000000
+ */
+ for (i = 0, up->txmask1 = 0; i < up->csize; i++)
+ up->txmask1 |= (1<<i);
+ up->txmask2 = (1<<i);
+ if (up->stopb) {
+ ++i;
+ up->txmask2 |= (1<<i);
+ }
+ up->txmask1 <<= 1;
+ up->txmask2 <<= 1;
+ /* uart baud rate */
+ port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Disable UART */
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+
+ sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
+
+ /* driver TX line high after config, one dummy data is
+ * necessary to stop sport after shift one byte
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SSYNC();
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, port->uartclk);
+
+ /* Enable sport rx */
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
+ SSYNC();
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
struct uart_ops sport_uart_ops = {
.tx_empty = sport_tx_empty,
.set_mctrl = sport_set_mctrl,
@@ -480,138 +536,319 @@
.verify_port = sport_verify_port,
};
-static struct sport_uart_port sport_uart_ports[] = {
- { /* SPORT 0 */
- .name = "SPORT0",
- .tx_irq = IRQ_SPORT0_TX,
- .rx_irq = IRQ_SPORT0_RX,
- .err_irq= IRQ_SPORT0_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT0_TCR1,
- .mapbase = SPORT0_TCR1,
- .irq = IRQ_SPORT0_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 0,
- },
- }, { /* SPORT 1 */
- .name = "SPORT1",
- .tx_irq = IRQ_SPORT1_TX,
- .rx_irq = IRQ_SPORT1_RX,
- .err_irq= IRQ_SPORT1_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT1_TCR1,
- .mapbase = SPORT1_TCR1,
- .irq = IRQ_SPORT1_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 1,
- },
+#define BFIN_SPORT_UART_MAX_PORTS 4
+
+static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static int __init
+sport_uart_console_setup(struct console *co, char *options)
+{
+ struct sport_uart_port *up;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /* Check whether an invalid uart number has been specified */
+ if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
+ return -ENODEV;
+
+ up = bfin_sport_uart_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static void sport_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+
+ tx_one_byte(up, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+sport_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (SPORT_GET_TCR1(up) & TSPEN)
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+ else {
+ /* dummy data to start sport */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+
+ /* Although the hold register is empty, last byte is still in shift
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ barrier();
+
+ /* Stop sport tx transfer */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SSYNC();
}
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver sport_uart_reg;
+
+static struct console sport_uart_console = {
+ .name = DEVICE_NAME,
+ .write = sport_uart_console_write,
+ .device = uart_console_device,
+ .setup = sport_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sport_uart_reg,
};
+#define SPORT_UART_CONSOLE (&sport_uart_console)
+#else
+#define SPORT_UART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
+
+
static struct uart_driver sport_uart_reg = {
.owner = THIS_MODULE,
- .driver_name = "SPORT-UART",
- .dev_name = "ttySS",
+ .driver_name = DRV_NAME,
+ .dev_name = DEVICE_NAME,
.major = 204,
.minor = 84,
- .nr = ARRAY_SIZE(sport_uart_ports),
- .cons = NULL,
+ .nr = BFIN_SPORT_UART_MAX_PORTS,
+ .cons = SPORT_UART_CONSOLE,
};
-static int sport_uart_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM
+static int sport_uart_suspend(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_suspend_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_resume(struct platform_device *dev)
+static int sport_uart_resume(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_resume_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_probe(struct platform_device *dev)
-{
- pr_debug("%s enter\n", __func__);
- sport_uart_ports[dev->id].port.dev = &dev->dev;
- uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port);
- platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
+static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
+ .suspend = sport_uart_suspend,
+ .resume = sport_uart_resume,
+};
+#endif
- return 0;
+static int __devinit sport_uart_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sport_uart_port *sport;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+
+ if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
+ dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_sport_uart_ports[pdev->id] == NULL) {
+ bfin_sport_uart_ports[pdev->id] =
+ kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ sport = bfin_sport_uart_ports[pdev->id];
+ if (!sport) {
+ dev_err(&pdev->dev,
+ "Fail to kmalloc sport_uart_port\n");
+ return -ENOMEM;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Fail to request SPORT peripherals\n");
+ goto out_error_free_mem;
+ }
+
+ spin_lock_init(&sport->port.lock);
+ sport->port.fifosize = SPORT_TX_FIFO_SIZE,
+ sport->port.ops = &sport_uart_ops;
+ sport->port.line = pdev->id;
+ sport->port.iotype = UPIO_MEM;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!sport->port.membase) {
+ dev_err(&pdev->dev, "Cannot map sport IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.irq = platform_get_irq(pdev, 0);
+ if (sport->port.irq < 0) {
+ dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ sport->err_irq = platform_get_irq(pdev, 1);
+ if (sport->err_irq < 0) {
+ dev_err(&pdev->dev, "No sport status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ sport = bfin_sport_uart_ports[pdev->id];
+ sport->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, sport);
+ ret = uart_add_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ }
+#endif
+ if (!ret)
+ return 0;
+
+ if (sport) {
+out_error_unmap:
+ iounmap(sport->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
+
+ return ret;
}
-static int sport_uart_remove(struct platform_device *dev)
+static int __devexit sport_uart_remove(struct platform_device *pdev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = platform_get_drvdata(pdev);
- pr_debug("%s enter\n", __func__);
- platform_set_drvdata(dev, NULL);
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ dev_set_drvdata(&pdev->dev, NULL);
- if (sport)
+ if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
+ iounmap(sport->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
return 0;
}
static struct platform_driver sport_uart_driver = {
.probe = sport_uart_probe,
- .remove = sport_uart_remove,
- .suspend = sport_uart_suspend,
- .resume = sport_uart_resume,
+ .remove = __devexit_p(sport_uart_remove),
.driver = {
.name = DRV_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_sport_uart_dev_pm_ops,
+#endif
},
};
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static __initdata struct early_platform_driver early_sport_uart_driver = {
+ .class_str = DRV_NAME,
+ .pdrv = &sport_uart_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init sport_uart_rs_console_init(void)
+{
+ early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
+
+ early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+
+ register_console(&sport_uart_console);
+
+ return 0;
+}
+console_initcall(sport_uart_rs_console_init);
+#endif
+
static int __init sport_uart_init(void)
{
int ret;
- pr_debug("%s enter\n", __func__);
+ pr_info("Serial: Blackfin uart over sport driver\n");
+
ret = uart_register_driver(&sport_uart_reg);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register %s:%d\n",
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
sport_uart_reg.driver_name, ret);
return ret;
}
ret = platform_driver_register(&sport_uart_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret);
+ if (ret) {
+ pr_err("failed to register sport uart driver:%d\n", ret);
uart_unregister_driver(&sport_uart_reg);
}
-
- pr_debug("%s exit\n", __func__);
return ret;
}
+module_init(sport_uart_init);
static void __exit sport_uart_exit(void)
{
- pr_debug("%s enter\n", __func__);
platform_driver_unregister(&sport_uart_driver);
uart_unregister_driver(&sport_uart_reg);
}
-
-module_init(sport_uart_init);
module_exit(sport_uart_exit);
+MODULE_AUTHOR("Sonic Zhang, Roy Huang");
+MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 671d41c..abe0361 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -1,29 +1,23 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.h
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h
- * Author: Roy Huang <roy.huang>analog.com>
+ * Copyright 2006-2008 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (C) Analog Device Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
+ */
+
+#ifndef _BFIN_SPORT_UART_H
+#define _BFIN_SPORT_UART_H
#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
@@ -61,3 +55,7 @@
#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
+
+#define SPORT_TX_FIFO_SIZE 8
+
+#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 0028b6f..53a4682 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -751,7 +751,6 @@
trace(icom_port, "FID_STATUS", status);
count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
- count = tty_buffer_request_room(tty, count);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
@@ -1654,4 +1653,6 @@
MODULE_SUPPORTED_DEVICE
("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
MODULE_LICENSE("GPL");
-
+MODULE_FIRMWARE("icom_call_setup.bin");
+MODULE_FIRMWARE("icom_res_dce.bin");
+MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 60d665a..d00fcf8 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1279,7 +1279,7 @@
sport->use_irda = 1;
#endif
- if (pdata->init) {
+ if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
@@ -1292,7 +1292,7 @@
return 0;
deinit:
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
clk_put(sport->clk);
@@ -1321,7 +1321,7 @@
clk_disable(sport->clk);
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
iounmap(sport->port.membase);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 85dc041..23ba6b4 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -1411,8 +1411,7 @@
read_count = do_read(the_port, ch, MAX_CHARS);
if (read_count > 0) {
flip = 1;
- read_room = tty_buffer_request_room(tty, read_count);
- tty_insert_flip_string(tty, ch, read_room);
+ read_room = tty_insert_flip_string(tty, ch, read_count);
the_port->icount.rx += read_count;
}
spin_unlock_irqrestore(&the_port->lock, pflags);
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 108c3e0..12cb5e4 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -179,6 +179,7 @@
return 0;
out_free_irq:
+ jsm_remove_uart_port(brd);
free_irq(brd->irq, brd);
out_iounmap:
iounmap(brd->re_map_membase);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index cd95e21..5673ca9 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -432,7 +432,7 @@
int jsm_uart_port_init(struct jsm_board *brd)
{
- int i;
+ int i, rc;
unsigned int line;
struct jsm_channel *ch;
@@ -467,8 +467,11 @@
} else
set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
- if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
- printk(KERN_INFO "jsm: add device failed\n");
+ rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc){
+ printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
+ return rc;
+ }
else
printk(KERN_INFO "jsm: Port %d added\n", i);
}
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index b05c5aa..ecdc0fa 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -691,6 +691,7 @@
struct msm_port *msm_port;
struct resource *resource;
struct uart_port *port;
+ int irq;
if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
return -ENXIO;
@@ -711,9 +712,10 @@
return -ENXIO;
port->mapbase = resource->start;
- port->irq = platform_get_irq(pdev, 0);
- if (unlikely(port->irq < 0))
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0))
return -ENXIO;
+ port->irq = irq;
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 34b31da..7bf1026 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -421,7 +421,7 @@
static int timbuart_probe(struct platform_device *dev)
{
- int err;
+ int err, irq;
struct timbuart_port *uart;
struct resource *iomem;
@@ -453,11 +453,12 @@
uart->port.mapbase = iomem->start;
uart->port.membase = NULL;
- uart->port.irq = platform_get_irq(dev, 0);
- if (uart->port.irq < 0) {
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
err = -EINVAL;
goto err_register;
}
+ uart->port.irq = irq;
tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d8992d1..f6e34e0 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -144,7 +144,7 @@
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
break;
default:
usbip_uerr("speed %d\n", speed);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4f5bb56..6a58cb1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -21,6 +21,7 @@
default y if USB_ARCH_HAS_EHCI
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
+ default y if BLACKFIN # SL-811
default y if SUPERH # r8a66597-hcd
default PCI
@@ -39,6 +40,7 @@
default y if ARCH_PNX4008 && I2C
default y if MFD_TC6393XB
default y if ARCH_W90X900
+ default y if ARCH_DAVINCI_DA8XX
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b8..80b4008 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -21,6 +21,7 @@
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_ISP1760_HCD) += host/
+obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 56802d2..c89990f 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
+ * Copyright (C) 2009 Simon Arlott
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
#include "usbatm.h"
#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
-#define DRIVER_VERSION "0.3"
+#define DRIVER_VERSION "0.4"
#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
static const char cxacru_driver_name[] = "cxacru";
@@ -52,6 +53,7 @@
#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
+#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
/* Addresses */
#define PLLFCLK_ADDR 0x00350068
@@ -105,6 +107,26 @@
CM_REQUEST_MAX,
};
+/* commands for interaction with the flash memory
+ *
+ * read: response is the contents of the first 60 bytes of flash memory
+ * write: request contains the 60 bytes of data to write to flash memory
+ * response is the contents of the first 60 bytes of flash memory
+ *
+ * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
+ * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ *
+ * P: le16 USB Product ID
+ * V: le16 USB Vendor ID
+ * M: be48 MAC Address
+ * S: le16 ASCII Serial Number
+ */
+enum cxacru_cm_flash {
+ CM_FLASH_READ = 0xa1,
+ CM_FLASH_WRITE = 0xa2
+};
+
/* reply codes to the commands above */
enum cxacru_cm_status {
CM_STATUS_UNDEFINED,
@@ -196,23 +218,32 @@
static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
+#define CXACRU_SET_INIT(_name) \
+static DEVICE_ATTR(_name, S_IWUSR, \
+ NULL, cxacru_sysfs_store_##_name)
+
#define CXACRU_ATTR_INIT(_value, _type, _name) \
static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \
- struct cxacru_data *instance = usbatm_instance->driver_data; \
+ struct cxacru_data *instance = to_usbatm_driver_data(\
+ to_usb_interface(dev)); \
+\
+ if (instance == NULL) \
+ return -ENODEV; \
+\
return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
} \
CXACRU__ATTR_INIT(_name)
#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
+#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
+#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
@@ -267,12 +298,12 @@
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
static char *str[] = {
- NULL,
+ "",
"ANSI T1.413",
"ITU-T G.992.1 (G.DMT)",
"ITU-T G.992.2 (G.LITE)"
};
- if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
+ if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
@@ -288,22 +319,28 @@
static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
- return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi);
+ if (instance == NULL || instance->usbatm->atm_dev == NULL)
+ return -ENODEV;
+
+ return snprintf(buf, PAGE_SIZE, "%pM\n",
+ instance->usbatm->atm_dev->esi);
}
static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
- u32 value = instance->card_info[CXINF_LINE_STARTABLE];
-
static char *str[] = { "running", "stopped" };
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ u32 value;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -312,9 +349,8 @@
static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
int ret;
int poll = -1;
char str_cmd[8];
@@ -328,13 +364,16 @@
return -EINVAL;
ret = 0;
+ if (instance == NULL)
+ return -ENODEV;
+
if (mutex_lock_interruptible(&instance->adsl_state_serialize))
return -ERESTARTSYS;
if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_STOP returned %d\n", ret);
ret = -EIO;
@@ -354,7 +393,7 @@
if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_START returned %d\n", ret);
ret = -EIO;
@@ -407,6 +446,72 @@
return ret;
}
+/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
+
+static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ int len = strlen(buf);
+ int ret, pos, num;
+ __le32 data[CMD_PACKET_SIZE / 4];
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ pos = 0;
+ num = 0;
+ while (pos < len) {
+ int tmp;
+ u32 index;
+ u32 value;
+
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+ if (index < 0 || index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+ /* skip trailing newline */
+ if (buf[pos] == '\n' && pos == len-1)
+ pos++;
+
+ data[num * 2 + 1] = cpu_to_le32(index);
+ data[num * 2 + 2] = cpu_to_le32(value);
+ num++;
+
+ /* send config values when data buffer is full
+ * or no more data
+ */
+ if (pos >= len || num >= CMD_MAX_CONFIG) {
+ char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
+
+ data[0] = cpu_to_le32(num);
+ ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
+ (u8 *) data, 4 + num * 8, NULL, 0);
+ if (ret < 0) {
+ atm_err(instance->usbatm,
+ "set card data returned %d\n", ret);
+ return -EIO;
+ }
+
+ for (tmp = 0; tmp < num; tmp++)
+ snprintf(log + tmp*12, 13, " %02x=%08x",
+ le32_to_cpu(data[tmp * 2 + 1]),
+ le32_to_cpu(data[tmp * 2 + 2]));
+ atm_info(instance->usbatm, "config%s\n", log);
+ num = 0;
+ }
+ }
+
+ return len;
+}
+
/*
* All device attributes are included in CXACRU_ALL_FILES
* so that the same list can be used multiple times:
@@ -442,7 +547,8 @@
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
-CXACRU_CMD_##_action( adsl_state);
+CXACRU_CMD_##_action( adsl_state); \
+CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
@@ -596,7 +702,7 @@
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
- if (l > stride || l > (len - offb) / 2) {
+ if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
cm, l);
@@ -649,9 +755,6 @@
{
struct cxacru_data *instance = usbatm_instance->driver_data;
struct usb_interface *intf = usbatm_instance->usb_intf;
- /*
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
- */
int ret;
int start_polling = 1;
@@ -697,6 +800,9 @@
mutex_unlock(&instance->poll_state_serialize);
mutex_unlock(&instance->adsl_state_serialize);
+ printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
+ usbatm_instance->description, atm_dev->esi);
+
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
@@ -873,11 +979,9 @@
static void cxacru_upload_firmware(struct cxacru_data *instance,
const struct firmware *fw,
- const struct firmware *bp,
- const struct firmware *cf)
+ const struct firmware *bp)
{
int ret;
- int off;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
__le16 signature[] = { usb_dev->descriptor.idVendor,
@@ -911,6 +1015,7 @@
}
/* Firmware */
+ usb_info(usbatm, "loading firmware\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
if (ret) {
usb_err(usbatm, "Firmware upload failed: %d\n", ret);
@@ -919,6 +1024,7 @@
/* Boot ROM patch */
if (instance->modem_type->boot_rom_patch) {
+ usb_info(usbatm, "loading boot ROM patch\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
if (ret) {
usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
@@ -933,6 +1039,7 @@
return;
}
+ usb_info(usbatm, "starting device\n");
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
@@ -958,26 +1065,6 @@
usb_err(usbatm, "modem failed to initialize: %d\n", ret);
return;
}
-
- /* Load config data (le32), doing one packet at a time */
- if (cf)
- for (off = 0; off < cf->size / 4; ) {
- __le32 buf[CMD_PACKET_SIZE / 4 - 1];
- int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
- buf[0] = cpu_to_le32(len);
- for (i = 0; i < len; i++, off++) {
- buf[i * 2 + 1] = cpu_to_le32(off);
- memcpy(buf + i * 2 + 2, cf->data + off * 4, 4);
- }
- ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
- (u8 *) buf, len, NULL, 0);
- if (ret < 0) {
- usb_err(usbatm, "load config data failed: %d\n", ret);
- return;
- }
- }
-
- msleep_interruptible(4000);
}
static int cxacru_find_firmware(struct cxacru_data *instance,
@@ -1003,7 +1090,7 @@
static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
struct usb_interface *usb_intf)
{
- const struct firmware *fw, *bp, *cf;
+ const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
int ret = cxacru_find_firmware(instance, "fw", &fw);
@@ -1021,13 +1108,8 @@
}
}
- if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */
- cf = NULL;
+ cxacru_upload_firmware(instance, fw, bp);
- cxacru_upload_firmware(instance, fw, bp, cf);
-
- if (cf)
- release_firmware(cf);
if (instance->modem_type->boot_rom_patch)
release_firmware(bp);
release_firmware(fw);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index fbea856..9b53e8d 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1333,6 +1333,7 @@
if (instance->atm_dev) {
sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
atm_dev_deregister(instance->atm_dev);
+ instance->atm_dev = NULL;
}
usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
@@ -1348,7 +1349,7 @@
{
dbg("%s: driver version %s", __func__, DRIVER_VERSION);
- if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) {
+ if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
return -EIO;
}
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f6f4508..0863f85 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -204,4 +204,19 @@
struct urb *urbs[0];
};
+static inline void *to_usbatm_driver_data(struct usb_interface *intf)
+{
+ struct usbatm_data *usbatm_instance;
+
+ if (intf == NULL)
+ return NULL;
+
+ usbatm_instance = usb_get_intfdata(intf);
+
+ if (usbatm_instance == NULL) /* set NULL before unbind() */
+ return NULL;
+
+ return usbatm_instance->driver_data; /* set NULL after unbind() */
+}
+
#endif /* _USBATM_H_ */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5633bc5..029ee4a 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -137,13 +137,13 @@
if (!c67x00)
return -ENOMEM;
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
ret = -EBUSY;
goto request_mem_failed;
}
- c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1);
+ c67x00->hpi.base = ioremap(res->start, resource_size(res));
if (!c67x00->hpi.base) {
dev_err(&pdev->dev, "Unable to map HPI registers\n");
ret = -EIO;
@@ -182,7 +182,7 @@
request_irq_failed:
iounmap(c67x00->hpi.base);
map_failed:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
request_mem_failed:
kfree(c67x00);
@@ -208,7 +208,7 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
kfree(c67x00);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb9..975d556 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@
{
wb->use = 0;
acm->transmitting--;
+ usb_autopm_put_interface_async(acm->control);
}
/*
@@ -211,9 +212,12 @@
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
+ usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
- acm->delayed_wb = wb;
- schedule_work(&acm->waker);
+ if (!acm->delayed_wb)
+ acm->delayed_wb = wb;
+ else
+ usb_autopm_put_interface_async(acm->control);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
@@ -424,7 +428,6 @@
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
if (!throttled) {
- tty_buffer_request_room(tty, buf->size);
tty_insert_flip_string(tty, buf->base, buf->size);
tty_flip_buffer_push(tty);
} else {
@@ -534,23 +537,6 @@
tty_kref_put(tty);
}
-static void acm_waker(struct work_struct *waker)
-{
- struct acm *acm = container_of(waker, struct acm, waker);
- int rv;
-
- rv = usb_autopm_get_interface(acm->control);
- if (rv < 0) {
- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
- return;
- }
- if (acm->delayed_wb) {
- acm_start_wb(acm, acm->delayed_wb);
- acm->delayed_wb = NULL;
- }
- usb_autopm_put_interface(acm->control);
-}
-
/*
* TTY handlers
*/
@@ -566,7 +552,7 @@
acm = acm_table[tty->index];
if (!acm || !acm->dev)
- goto err_out;
+ goto out;
else
rv = 0;
@@ -582,8 +568,9 @@
mutex_lock(&acm->mutex);
if (acm->port.count++) {
+ mutex_unlock(&acm->mutex);
usb_autopm_put_interface(acm->control);
- goto done;
+ goto out;
}
acm->ctrlurb->dev = acm->dev;
@@ -612,18 +599,18 @@
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
tasklet_schedule(&acm->urb_task);
-done:
+
mutex_unlock(&acm->mutex);
-err_out:
+out:
mutex_unlock(&open_mutex);
return rv;
full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
- usb_autopm_put_interface(acm->control);
acm->port.count--;
mutex_unlock(&acm->mutex);
+ usb_autopm_put_interface(acm->control);
early_bail:
mutex_unlock(&open_mutex);
tty_port_tty_set(&acm->port, NULL);
@@ -1023,7 +1010,7 @@
case USB_CDC_CALL_MANAGEMENT_TYPE:
call_management_function = buffer[3];
call_interface_num = buffer[4];
- if ((call_management_function & 3) != 3)
+ if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
@@ -1178,7 +1165,6 @@
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
- INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
- cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
+ struct acm_wb *wb;
int rv = 0;
int cnt;
@@ -1449,6 +1435,21 @@
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+
+ spin_lock_irq(&acm->write_lock);
+ if (acm->delayed_wb) {
+ wb = acm->delayed_wb;
+ acm->delayed_wb = NULL;
+ spin_unlock_irq(&acm->write_lock);
+ acm_start_wb(acm, acm->delayed_wb);
+ } else {
+ spin_unlock_irq(&acm->write_lock);
+ }
+
+ /*
+ * delayed error checking because we must
+ * do the write path at all cost
+ */
if (rv < 0)
goto err_out;
@@ -1460,6 +1461,23 @@
return rv;
}
+static int acm_reset_resume(struct usb_interface *intf)
+{
+ struct acm *acm = usb_get_intfdata(intf);
+ struct tty_struct *tty;
+
+ mutex_lock(&acm->mutex);
+ if (acm->port.count) {
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
+ mutex_unlock(&acm->mutex);
+ return acm_resume(intf);
+}
+
#endif /* CONFIG_PM */
#define NOKIA_PCSUITE_ACM_INFO(x) \
@@ -1471,7 +1489,7 @@
* USB driver structure.
*/
-static struct usb_device_id acm_ids[] = {
+static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
@@ -1576,6 +1594,11 @@
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+ /* Support Lego NXT using pbLua firmware */
+ { USB_DEVICE(0x0694, 0xff00),
+ .driver_info = NOT_A_MODEM,
+ },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1602,6 +1625,7 @@
#ifdef CONFIG_PM
.suspend = acm_suspend,
.resume = acm_resume,
+ .reset_resume = acm_reset_resume,
#endif
.id_table = acm_ids,
#ifdef CONFIG_PM
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8..4a8e87e 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@
struct mutex mutex;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
- struct work_struct waker;
wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
@@ -137,3 +136,4 @@
#define NO_UNION_NORMAL 1
#define SINGLE_RX_URB 2
#define NO_CAP_LINE 4
+#define NOT_A_MODEM 8
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e564bf..18aafcb 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -31,7 +31,7 @@
#define DRIVER_AUTHOR "Oliver Neukum"
#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
-static struct usb_device_id wdm_ids[] = {
+static const struct usb_device_id wdm_ids[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9bc112e..93b5f85 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -163,7 +163,6 @@
unsigned char used; /* True if open */
unsigned char present; /* True if not disconnected */
unsigned char bidir; /* interface is bidirectional */
- unsigned char sleeping; /* interface is suspended */
unsigned char no_paper; /* Paper Out happened */
unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
/* first 2 bytes are (big-endian) length */
@@ -191,7 +190,6 @@
dbg("quirks=%d", usblp->quirks);
dbg("used=%d", usblp->used);
dbg("bidir=%d", usblp->bidir);
- dbg("sleeping=%d", usblp->sleeping);
dbg("device_id_string=\"%s\"",
usblp->device_id_string ?
usblp->device_id_string + 2 :
@@ -376,7 +374,7 @@
static int handle_bidir (struct usblp *usblp)
{
- if (usblp->bidir && usblp->used && !usblp->sleeping) {
+ if (usblp->bidir && usblp->used) {
if (usblp_submit_read(usblp) < 0)
return -EIO;
}
@@ -503,11 +501,6 @@
goto done;
}
- if (usblp->sleeping) {
- retval = -ENODEV;
- goto done;
- }
-
dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
_IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
@@ -914,8 +907,6 @@
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -968,8 +959,6 @@
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -1377,12 +1366,10 @@
mutex_unlock (&usblp_mutex);
}
-static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
+static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usblp *usblp = usb_get_intfdata (intf);
- /* we take no more IO */
- usblp->sleeping = 1;
usblp_unlink_urbs(usblp);
#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
/* not strictly necessary, but just in case */
@@ -1393,18 +1380,17 @@
return 0;
}
-static int usblp_resume (struct usb_interface *intf)
+static int usblp_resume(struct usb_interface *intf)
{
struct usblp *usblp = usb_get_intfdata (intf);
int r;
- usblp->sleeping = 0;
r = handle_bidir (usblp);
return r;
}
-static struct usb_device_id usblp_ids [] = {
+static const struct usb_device_id usblp_ids[] = {
{ USB_DEVICE_INFO(7, 1, 1) },
{ USB_DEVICE_INFO(7, 1, 2) },
{ USB_DEVICE_INFO(7, 1, 3) },
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7c5f4e3..8588c09 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -48,7 +48,7 @@
*/
#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
-static struct usb_device_id usbtmc_devices[] = {
+static const struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
{ 0, } /* terminating entry */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index ad92594..97a819c 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,8 +91,8 @@
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB selective suspend/resume and wakeup"
- depends on USB && PM
+ bool "USB runtime power management (suspend/resume and wakeup)"
+ depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
"power/level" file to suspend or resume individual USB
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 355dffc..c83c975 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -118,6 +118,7 @@
*/
static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq);
+/* guarded by usbfs_mutex */
static unsigned int conndiscevcnt;
/* this struct stores the poll state for <mountpoint>/devices pollers */
@@ -156,7 +157,9 @@
void usbfs_conn_disc_event(void)
{
+ mutex_lock(&usbfs_mutex);
conndiscevcnt++;
+ mutex_unlock(&usbfs_mutex);
wake_up(&deviceconndiscwq);
}
@@ -629,42 +632,29 @@
static unsigned int usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct usb_device_status *st = file->private_data;
+ struct usb_device_status *st;
unsigned int mask = 0;
- lock_kernel();
+ mutex_lock(&usbfs_mutex);
+ st = file->private_data;
if (!st) {
st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL);
-
- /* we may have dropped BKL -
- * need to check for having lost the race */
- if (file->private_data) {
- kfree(st);
- st = file->private_data;
- goto lost_race;
- }
- /* we haven't lost - check for allocation failure now */
if (!st) {
- unlock_kernel();
+ mutex_unlock(&usbfs_mutex);
return POLLIN;
}
- /*
- * need to prevent the module from being unloaded, since
- * proc_unregister does not call the release method and
- * we would have a memory leak
- */
st->lastev = conndiscevcnt;
file->private_data = st;
mask = POLLIN;
}
-lost_race:
+
if (file->f_mode & FMODE_READ)
poll_wait(file, &deviceconndiscwq, wait);
if (st->lastev != conndiscevcnt)
mask |= POLLIN;
st->lastev = conndiscevcnt;
- unlock_kernel();
+ mutex_unlock(&usbfs_mutex);
return mask;
}
@@ -685,7 +675,7 @@
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -701,7 +691,7 @@
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a678186..e909ff7 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -122,7 +122,7 @@
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -138,7 +138,7 @@
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
@@ -310,7 +310,8 @@
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
- int timeout_or_status, enum snoop_when when)
+ int timeout_or_status, enum snoop_when when,
+ unsigned char *data, unsigned data_len)
{
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
@@ -344,6 +345,11 @@
"status %d\n",
ep, t, d, length, timeout_or_status);
}
+
+ if (data && data_len > 0) {
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
+ data, data_len, 1);
+ }
}
#define AS_CONTINUATION 1
@@ -410,7 +416,9 @@
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
- as->status, COMPLETE);
+ as->status, COMPLETE,
+ ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
+ NULL : urb->transfer_buffer, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
@@ -653,20 +661,20 @@
const struct cred *cred = current_cred();
int ret;
- lock_kernel();
- /* Protect against simultaneous removal or release */
- mutex_lock(&usbfs_mutex);
-
ret = -ENOMEM;
ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
if (!ps)
- goto out;
+ goto out_free_ps;
ret = -ENODEV;
+ /* Protect against simultaneous removal or release */
+ mutex_lock(&usbfs_mutex);
+
/* usbdev device-node */
if (imajor(inode) == USB_DEVICE_MAJOR)
dev = usbdev_lookup_by_devt(inode->i_rdev);
+
#ifdef CONFIG_USB_DEVICEFS
/* procfs file */
if (!dev) {
@@ -678,13 +686,19 @@
dev = NULL;
}
#endif
- if (!dev || dev->state == USB_STATE_NOTATTACHED)
- goto out;
+ mutex_unlock(&usbfs_mutex);
+
+ if (!dev)
+ goto out_free_ps;
+
+ usb_lock_device(dev);
+ if (dev->state == USB_STATE_NOTATTACHED)
+ goto out_unlock_device;
+
ret = usb_autoresume_device(dev);
if (ret)
- goto out;
+ goto out_unlock_device;
- ret = 0;
ps->dev = dev;
ps->file = file;
spin_lock_init(&ps->lock);
@@ -702,15 +716,16 @@
smp_wmb();
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
+ usb_unlock_device(dev);
snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
current->comm);
- out:
- if (ret) {
- kfree(ps);
- usb_put_dev(dev);
- }
- mutex_unlock(&usbfs_mutex);
- unlock_kernel();
+ return ret;
+
+ out_unlock_device:
+ usb_unlock_device(dev);
+ usb_put_dev(dev);
+ out_free_ps:
+ kfree(ps);
return ret;
}
@@ -724,10 +739,7 @@
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
- /* Protect against simultaneous open */
- mutex_lock(&usbfs_mutex);
list_del_init(&ps->list);
- mutex_unlock(&usbfs_mutex);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
@@ -770,6 +782,13 @@
if (!tbuf)
return -ENOMEM;
tmo = ctrl.timeout;
+ snoop(&dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ __le16_to_cpup(&ctrl.wValue),
+ __le16_to_cpup(&ctrl.wIndex),
+ __le16_to_cpup(&ctrl.wLength));
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
@@ -777,15 +796,15 @@
return -EINVAL;
}
pipe = usb_rcvctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
-
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
+ tbuf, i);
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
@@ -800,14 +819,15 @@
}
}
pipe = usb_sndctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
+ tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
@@ -853,12 +873,12 @@
kfree(tbuf);
return -EINVAL;
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
@@ -873,12 +893,12 @@
return -EFAULT;
}
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
kfree(tbuf);
if (i < 0)
@@ -1097,6 +1117,13 @@
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ dr->bRequestType, dr->bRequest,
+ __le16_to_cpup(&dr->wValue),
+ __le16_to_cpup(&dr->wIndex),
+ __le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1104,13 +1131,25 @@
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
return -EINVAL;
- /* allow single-shot interrupt transfers, at bogus rates */
+ case USB_ENDPOINT_XFER_INT:
+ /* allow single-shot interrupt transfers */
+ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
+ goto interrupt_urb;
}
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
break;
+ case USBDEVFS_URB_TYPE_INTERRUPT:
+ if (!usb_endpoint_xfer_int(&ep->desc))
+ return -EINVAL;
+ interrupt_urb:
+ uurb->number_of_packets = 0;
+ if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
+ return -EINVAL;
+ break;
+
case USBDEVFS_URB_TYPE_ISO:
/* arbitrary limit */
if (uurb->number_of_packets < 1 ||
@@ -1143,14 +1182,6 @@
uurb->buffer_length = totlen;
break;
- case USBDEVFS_URB_TYPE_INTERRUPT:
- uurb->number_of_packets = 0;
- if (!usb_endpoint_xfer_int(&ep->desc))
- return -EINVAL;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
- break;
-
default:
return -EINVAL;
}
@@ -1236,7 +1267,9 @@
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- as->urb->transfer_buffer_length, 0, SUBMIT);
+ as->urb->transfer_buffer_length, 0, SUBMIT,
+ is_in ? NULL : as->urb->transfer_buffer,
+ uurb->buffer_length);
async_newpending(as);
if (usb_endpoint_xfer_bulk(&ep->desc)) {
@@ -1274,7 +1307,7 @@
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- 0, ret, COMPLETE);
+ 0, ret, COMPLETE, NULL, 0);
async_removepending(as);
free_async(as);
return ret;
@@ -1628,7 +1661,10 @@
if (driver == NULL || driver->ioctl == NULL) {
retval = -ENOTTY;
} else {
+ /* keep API that guarantees BKL */
+ lock_kernel();
retval = driver->ioctl(intf, ctl->ioctl_code, buf);
+ unlock_kernel();
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
@@ -1711,6 +1747,7 @@
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
+
usb_lock_device(dev);
if (!connected(ps)) {
usb_unlock_device(dev);
@@ -1877,9 +1914,7 @@
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
- unlock_kernel();
return ret;
}
@@ -1890,9 +1925,7 @@
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
- unlock_kernel();
return ret;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f2f055e..a7037bf 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -25,7 +25,7 @@
#include <linux/device.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
-#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include "hcd.h"
#include "usb.h"
@@ -221,7 +221,7 @@
{
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
struct usb_device *udev = to_usb_device(dev);
- int error = -ENODEV;
+ int error = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -230,18 +230,23 @@
/* The device should always appear to be in use
* unless the driver suports autosuspend.
*/
- udev->pm_usage_cnt = !(udriver->supports_autosuspend);
+ if (!udriver->supports_autosuspend)
+ error = usb_autoresume_device(udev);
- error = udriver->probe(udev);
+ if (!error)
+ error = udriver->probe(udev);
return error;
}
/* called from driver core with dev locked */
static int usb_unbind_device(struct device *dev)
{
+ struct usb_device *udev = to_usb_device(dev);
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
- udriver->disconnect(to_usb_device(dev));
+ udriver->disconnect(udev);
+ if (!udriver->supports_autosuspend)
+ usb_autosuspend_device(udev);
return 0;
}
@@ -274,60 +279,62 @@
intf->needs_binding = 0;
if (usb_device_is_owned(udev))
- return -ENODEV;
+ return error;
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
- return -ENODEV;
+ return error;
}
id = usb_match_id(intf, driver->id_table);
if (!id)
id = usb_match_dynamic_id(intf, driver);
- if (id) {
- dev_dbg(dev, "%s - got id\n", __func__);
+ if (!id)
+ return error;
- error = usb_autoresume_device(udev);
- if (error)
- return error;
+ dev_dbg(dev, "%s - got id\n", __func__);
- /* Interface "power state" doesn't correspond to any hardware
- * state whatsoever. We use it to record when it's bound to
- * a driver that may start I/0: it's not frozen/quiesced.
- */
- mark_active(intf);
- intf->condition = USB_INTERFACE_BINDING;
+ error = usb_autoresume_device(udev);
+ if (error)
+ return error;
- /* The interface should always appear to be in use
- * unless the driver suports autosuspend.
- */
- atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
+ intf->condition = USB_INTERFACE_BINDING;
- /* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0) {
- error = usb_set_interface(udev, intf->altsetting[0].
- desc.bInterfaceNumber, 0);
- if (error < 0)
- goto err;
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
- intf->needs_altsetting0 = 0;
- }
-
- error = driver->probe(intf, id);
- if (error)
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0) {
+ error = usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ if (error < 0)
goto err;
-
- intf->condition = USB_INTERFACE_BOUND;
- usb_autosuspend_device(udev);
+ intf->needs_altsetting0 = 0;
}
+ error = driver->probe(intf, id);
+ if (error)
+ goto err;
+
+ intf->condition = USB_INTERFACE_BOUND;
+ usb_autosuspend_device(udev);
return error;
-err:
- mark_quiesced(intf);
+ err:
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
usb_cancel_queued_reset(intf);
+
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
usb_autosuspend_device(udev);
return error;
}
@@ -377,9 +384,17 @@
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
- mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ /* Undo any residual pm_autopm_get_interface_* calls */
+ for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
+ usb_autopm_put_interface_no_suspend(intf);
+ atomic_set(&intf->pm_usage_cnt, 0);
+
if (!error)
usb_autosuspend_device(udev);
@@ -410,7 +425,6 @@
struct usb_interface *iface, void *priv)
{
struct device *dev = &iface->dev;
- struct usb_device *udev = interface_to_usbdev(iface);
int retval = 0;
if (dev->driver)
@@ -420,11 +434,16 @@
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
- usb_pm_lock(udev);
iface->condition = USB_INTERFACE_BOUND;
- mark_active(iface);
- atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend);
- usb_pm_unlock(udev);
+
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -691,9 +710,6 @@
{
struct usb_device *usb_dev;
- /* driver is often null here; dev_dbg() would oops */
- pr_debug("usb %s: uevent\n", dev_name(dev));
-
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
@@ -705,6 +721,7 @@
}
if (usb_dev->devnum < 0) {
+ /* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: already deleted?\n", dev_name(dev));
return -ENODEV;
}
@@ -983,7 +1000,6 @@
}
}
-/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1007,7 +1023,6 @@
return status;
}
-/* Caller has locked udev's pm_mutex */
static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1041,27 +1056,20 @@
return status;
}
-/* Caller has locked intf's usb_device's pm mutex */
static int usb_suspend_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
- /* with no hardware, USB interfaces only use FREEZE and ON states */
- if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
- goto done;
-
- /* This can happen; see usb_driver_release_interface() */
- if (intf->condition == USB_INTERFACE_UNBOUND)
+ if (udev->state == USB_STATE_NOTATTACHED ||
+ intf->condition == USB_INTERFACE_UNBOUND)
goto done;
driver = to_usb_driver(intf->dev.driver);
if (driver->suspend) {
status = driver->suspend(intf, msg);
- if (status == 0)
- mark_quiesced(intf);
- else if (!(msg.event & PM_EVENT_AUTO))
+ if (status && !(msg.event & PM_EVENT_AUTO))
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -1069,7 +1077,6 @@
intf->needs_binding = 1;
dev_warn(&intf->dev, "no %s for driver %s?\n",
"suspend", driver->name);
- mark_quiesced(intf);
}
done:
@@ -1077,14 +1084,13 @@
return status;
}
-/* Caller has locked intf's usb_device's pm_mutex */
static int usb_resume_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
- if (udev->state == USB_STATE_NOTATTACHED || is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED)
goto done;
/* Don't let autoresume interfere with unbinding */
@@ -1135,90 +1141,11 @@
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
- if (status == 0 && intf->condition == USB_INTERFACE_BOUND)
- mark_active(intf);
/* Later we will unbind the driver and/or reprobe, if necessary */
return status;
}
-#ifdef CONFIG_USB_SUSPEND
-
-/* Internal routine to check whether we may autosuspend a device. */
-static int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- int i;
- struct usb_interface *intf;
- unsigned long suspend_time, j;
-
- /* For autosuspend, fail fast if anything is in use or autosuspend
- * is disabled. Also fail if any interfaces require remote wakeup
- * but it isn't available.
- */
- if (udev->pm_usage_cnt > 0)
- return -EBUSY;
- if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
- return -EPERM;
-
- suspend_time = udev->last_busy + udev->autosuspend_delay;
- if (udev->actconfig) {
- for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
- intf = udev->actconfig->interface[i];
- if (!is_active(intf))
- continue;
- if (atomic_read(&intf->pm_usage_cnt) > 0)
- return -EBUSY;
- if (intf->needs_remote_wakeup &&
- !udev->do_remote_wakeup) {
- dev_dbg(&udev->dev, "remote wakeup needed "
- "for autosuspend\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow autosuspend if the device will need
- * a reset-resume and any of its interface drivers
- * doesn't include support.
- */
- if (udev->quirks & USB_QUIRK_RESET_RESUME) {
- struct usb_driver *driver;
-
- driver = to_usb_driver(intf->dev.driver);
- if (!driver->reset_resume ||
- intf->needs_remote_wakeup)
- return -EOPNOTSUPP;
- }
- }
- }
-
- /* If everything is okay but the device hasn't been idle for long
- * enough, queue a delayed autosuspend request. If the device
- * _has_ been idle for long enough and the reschedule flag is set,
- * likewise queue a delayed (1 second) autosuspend request.
- */
- j = jiffies;
- if (time_before(j, suspend_time))
- reschedule = 1;
- else
- suspend_time = j + HZ;
- if (reschedule) {
- if (!timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
- round_jiffies_up_relative(suspend_time - j));
- }
- return -EAGAIN;
- }
- return 0;
-}
-
-#else
-
-static inline int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- return 0;
-}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
* usb_suspend_both - suspend a USB device and its interfaces
* @udev: the usb_device to suspend
@@ -1230,27 +1157,12 @@
* all the interfaces which were suspended are resumed so that they remain
* in the same state as the device.
*
- * If an autosuspend is in progress the routine checks first to make sure
- * that neither the device itself or any of its active interfaces is in use
- * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails.
- *
- * If the suspend succeeds, the routine recursively queues an autosuspend
- * request for @udev's parent device, thereby propagating the change up
- * the device tree. If all of the parent's children are now suspended,
- * the parent will autosuspend in turn.
- *
- * The suspend method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autosuspend requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle suspend calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * autosuspends) while holding @udev's device lock (preventing outside
- * suspends).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autosuspend requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other suspend calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle suspend calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1259,20 +1171,11 @@
int status = 0;
int i = 0;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
-
- if (msg.event & PM_EVENT_AUTO) {
- status = autosuspend_check(udev, 0);
- if (status < 0)
- goto done;
- }
-
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
@@ -1287,35 +1190,21 @@
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
- pm_message_t msg2;
-
- msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
+ msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(udev, intf, msg2, 0);
+ usb_resume_interface(udev, intf, msg, 0);
}
- /* Try another autosuspend when the interfaces aren't busy */
- if (msg.event & PM_EVENT_AUTO)
- autosuspend_check(udev, status == -EBUSY);
-
- /* If the suspend succeeded then prevent any more URB submissions,
- * flush any outstanding URBs, and propagate the suspend up the tree.
+ /* If the suspend succeeded then prevent any more URB submissions
+ * and flush any outstanding URBs.
*/
} else {
- cancel_delayed_work(&udev->autosuspend);
udev->can_submit = 0;
for (i = 0; i < 16; ++i) {
usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
}
-
- /* If this is just a FREEZE or a PRETHAW, udev might
- * not really be suspended. Only true suspends get
- * propagated up the device tree.
- */
- if (parent && udev->state == USB_STATE_SUSPENDED)
- usb_autosuspend_device(parent);
}
done:
@@ -1332,23 +1221,12 @@
* the resume method for @udev and then calls the resume methods for all
* the interface drivers in @udev.
*
- * Before starting the resume, the routine calls itself recursively for
- * the parent device of @udev, thereby propagating the change up the device
- * tree and assuring that @udev will be able to resume. If the parent is
- * unable to resume successfully, the routine fails.
- *
- * The resume method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autoresume requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle resume calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * other autoresumes) while holding @udev's device lock (preventing outside
- * resumes).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autoresume requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other resume calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle resume calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1357,48 +1235,18 @@
int status = 0;
int i;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
- cancel_delayed_work(&udev->autosuspend);
if (udev->state == USB_STATE_NOTATTACHED) {
status = -ENODEV;
goto done;
}
udev->can_submit = 1;
- /* Propagate the resume up the tree, if necessary */
- if (udev->state == USB_STATE_SUSPENDED) {
- if (parent) {
- status = usb_autoresume_device(parent);
- if (status == 0) {
- status = usb_resume_device(udev, msg);
- if (status || udev->state ==
- USB_STATE_NOTATTACHED) {
- usb_autosuspend_device(parent);
-
- /* It's possible usb_resume_device()
- * failed after the port was
- * unsuspended, causing udev to be
- * logically disconnected. We don't
- * want usb_disconnect() to autosuspend
- * the parent again, so tell it that
- * udev disconnected while still
- * suspended. */
- if (udev->state ==
- USB_STATE_NOTATTACHED)
- udev->discon_suspended = 1;
- }
- }
- } else {
-
- /* We can't progagate beyond the USB subsystem,
- * so if a root hub's controller is suspended
- * then we're stuck. */
- status = usb_resume_device(udev, msg);
- }
- } else if (udev->reset_resume)
+ /* Resume the device */
+ if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
status = usb_resume_device(udev, msg);
+ /* Resume the interfaces */
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
@@ -1414,55 +1262,94 @@
return status;
}
-#ifdef CONFIG_USB_SUSPEND
-
-/* Internal routine to adjust a device's usage counter and change
- * its autosuspend state.
- */
-static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
+/* The device lock is held by the PM core */
+int usb_suspend(struct device *dev, pm_message_t msg)
{
- int status = 0;
+ struct usb_device *udev = to_usb_device(dev);
- usb_pm_lock(udev);
- udev->pm_usage_cnt += inc_usage_cnt;
- WARN_ON(udev->pm_usage_cnt < 0);
- if (inc_usage_cnt)
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev, PMSG_AUTO_RESUME);
- if (status != 0)
- udev->pm_usage_cnt -= inc_usage_cnt;
- else if (inc_usage_cnt)
+ do_unbind_rebind(udev, DO_UNBIND);
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ return usb_suspend_both(udev, msg);
+}
+
+/* The device lock is held by the PM core */
+int usb_resume(struct device *dev, pm_message_t msg)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
+
+ /* For PM complete calls, all we do is rebind interfaces */
+ if (msg.event == PM_EVENT_ON) {
+ if (udev->state != USB_STATE_NOTATTACHED)
+ do_unbind_rebind(udev, DO_REBIND);
+ status = 0;
+
+ /* For all other calls, take the device back to full power and
+ * tell the PM core in case it was autosuspended previously.
+ */
+ } else {
+ status = usb_resume_both(udev, msg);
+ if (status == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+ }
}
- usb_pm_unlock(udev);
+
+ /* Avoid PM error messages for devices disconnected while suspended
+ * as we'll display regular disconnect messages just a bit later.
+ */
+ if (status == -ENODEV)
+ status = 0;
return status;
}
-/* usb_autosuspend_work - callback routine to autosuspend a USB device */
-void usb_autosuspend_work(struct work_struct *work)
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+
+/**
+ * usb_enable_autosuspend - allow a USB device to be autosuspended
+ * @udev: the USB device which may be autosuspended
+ *
+ * This routine allows @udev to be autosuspended. An autosuspend won't
+ * take place until the autosuspend_delay has elapsed and all the other
+ * necessary conditions are satisfied.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_enable_autosuspend(struct usb_device *udev)
{
- struct usb_device *udev =
- container_of(work, struct usb_device, autosuspend.work);
-
- usb_autopm_do_device(udev, 0);
+ if (udev->autosuspend_disabled) {
+ udev->autosuspend_disabled = 0;
+ usb_autosuspend_device(udev);
+ }
+ return 0;
}
+EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
-/* usb_autoresume_work - callback routine to autoresume a USB device */
-void usb_autoresume_work(struct work_struct *work)
+/**
+ * usb_disable_autosuspend - prevent a USB device from being autosuspended
+ * @udev: the USB device which may not be autosuspended
+ *
+ * This routine prevents @udev from being autosuspended and wakes it up
+ * if it is already autosuspended.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_disable_autosuspend(struct usb_device *udev)
{
- struct usb_device *udev =
- container_of(work, struct usb_device, autoresume);
+ int rc = 0;
- /* Wake it up, let the drivers do their thing, and then put it
- * back to sleep.
- */
- if (usb_autopm_do_device(udev, 1) == 0)
- usb_autopm_do_device(udev, -1);
+ if (!udev->autosuspend_disabled) {
+ rc = usb_autoresume_device(udev);
+ if (rc == 0)
+ udev->autosuspend_disabled = 1;
+ }
+ return rc;
}
+EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
/**
* usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
@@ -1472,15 +1359,11 @@
* @udev and wants to allow it to autosuspend. Examples would be when
* @udev's device file in usbfs is closed or after a configuration change.
*
- * @udev's usage counter is decremented. If it or any of the usage counters
- * for an active interface is greater than 0, no autosuspend request will be
- * queued. (If an interface driver does not support autosuspend then its
- * usage counter is permanently positive.) Furthermore, if an interface
- * driver requires remote-wakeup capability during autosuspend but remote
- * wakeup is disabled, the autosuspend will fail.
+ * @udev's usage counter is decremented; if it drops to 0 and all the
+ * interfaces are inactive then a delayed autosuspend will be attempted.
+ * The attempt may fail (see autosuspend_check()).
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary.
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1488,9 +1371,11 @@
{
int status;
- status = usb_autopm_do_device(udev, -1);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ udev->last_busy = jiffies;
+ status = pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1500,17 +1385,22 @@
* This routine should be called when a core subsystem thinks @udev may
* be ready to autosuspend.
*
- * @udev's usage counter left unchanged. If it or any of the usage counters
- * for an active interface is greater than 0, or autosuspend is not allowed
- * for any other reason, no autosuspend request will be queued.
+ * @udev's usage counter left unchanged. If it is 0 and all the interfaces
+ * are inactive then an autosuspend will be attempted. The attempt may
+ * fail or be delayed.
+ *
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
void usb_try_autosuspend_device(struct usb_device *udev)
{
- usb_autopm_do_device(udev, 0);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ int status;
+
+ status = pm_runtime_idle(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1519,16 +1409,15 @@
*
* This routine should be called when a core subsystem wants to use @udev
* and needs to guarantee that it is not suspended. No autosuspend will
- * occur until usb_autosuspend_device is called. (Note that this will not
- * prevent suspend events originating in the PM core.) Examples would be
- * when @udev's device file in usbfs is opened or when a remote-wakeup
+ * occur until usb_autosuspend_device() is called. (Note that this will
+ * not prevent suspend events originating in the PM core.) Examples would
+ * be when @udev's device file in usbfs is opened or when a remote-wakeup
* request is received.
*
* @udev's usage counter is incremented to prevent subsequent autosuspends.
* However if the autoresume fails then the usage counter is re-decremented.
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary (and attempting it might cause deadlock).
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1536,42 +1425,14 @@
{
int status;
- status = usb_autopm_do_device(udev, 1);
- dev_vdbg(&udev->dev, "%s: status %d cnt %d\n",
- __func__, status, udev->pm_usage_cnt);
- return status;
-}
-
-/* Internal routine to adjust an interface's usage counter and change
- * its device's autosuspend state.
- */
-static int usb_autopm_do_interface(struct usb_interface *intf,
- int inc_usage_cnt)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
-
- usb_pm_lock(udev);
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
- atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 &&
- atomic_read(&intf->pm_usage_cnt) > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev,
- PMSG_AUTO_RESUME);
- if (status != 0)
- atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
- else
- udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 &&
- atomic_read(&intf->pm_usage_cnt) <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
- }
- }
- usb_pm_unlock(udev);
+ status = pm_runtime_get_sync(&udev->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
@@ -1585,34 +1446,25 @@
* closed.
*
* The routine decrements @intf's usage counter. When the counter reaches
- * 0, a delayed autosuspend request for @intf's device is queued. When
- * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all
- * the other usage counters for the sibling interfaces and @intf's
- * usb_device, the device and all its interfaces will be autosuspended.
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
+ * 0, a delayed autosuspend request for @intf's device is attempted. The
+ * attempt may fail (see autosuspend_check()).
*
* If the driver has set @intf->needs_remote_wakeup then autosuspend will
* take place only if the device's remote-wakeup facility is enabled.
*
- * Suspend method calls queued by this routine can arrive at any time
- * while @intf is resumed and its usage counter is equal to 0. They are
- * not protected by the usb_device's lock but only by its pm_mutex.
- * Drivers must provide their own synchronization.
- *
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int status;
- status = usb_autopm_do_interface(intf, -1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ status = pm_runtime_put_sync(&intf->dev);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@@ -1620,11 +1472,11 @@
* usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
- * This routine does essentially the same thing as
- * usb_autopm_put_interface(): it decrements @intf's usage counter and
- * queues a delayed autosuspend request if the counter is <= 0. The
- * difference is that it does not acquire the device's pm_mutex;
- * callers must handle all synchronization issues themselves.
+ * This routine does much the same thing as usb_autopm_put_interface():
+ * It decrements @intf's usage counter and schedules a delayed
+ * autosuspend request if the counter is <= 0. The difference is that it
+ * does not perform any synchronization; callers should hold a private
+ * lock and handle all synchronization issues themselves.
*
* Typically a driver would call this routine during an URB's completion
* handler, if no more URBs were pending.
@@ -1634,28 +1486,58 @@
void usb_autopm_put_interface_async(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long last_busy;
int status = 0;
- if (intf->condition == USB_INTERFACE_UNBOUND) {
- status = -ENODEV;
- } else {
- udev->last_busy = jiffies;
- atomic_dec(&intf->pm_usage_cnt);
- if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
- status = -EPERM;
- else if (atomic_read(&intf->pm_usage_cnt) <= 0 &&
- !timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+ last_busy = udev->last_busy;
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+
+ if (!udev->autosuspend_disabled) {
+ /* Optimization: Don't schedule a delayed autosuspend if
+ * the timer is already running and the expiration time
+ * wouldn't change.
+ *
+ * We have to use the interface's timer. Attempts to
+ * schedule a suspend for the device would fail because
+ * the interface is still active.
+ */
+ if (intf->dev.power.timer_expires == 0 ||
+ round_jiffies_up(last_busy) !=
+ round_jiffies_up(jiffies)) {
+ status = pm_schedule_suspend(&intf->dev,
+ jiffies_to_msecs(
round_jiffies_up_relative(
- udev->autosuspend_delay));
+ udev->autosuspend_delay)));
}
}
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
/**
+ * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be decremented
+ *
+ * This routine decrements @intf's usage counter but does not carry out an
+ * autosuspend.
+ *
+ * This routine can run in atomic context.
+ */
+void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+}
+EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
+
+/**
* usb_autopm_get_interface - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
@@ -1667,25 +1549,8 @@
* or @intf is unbound. A typical example would be a character-device
* driver when its device file is opened.
*
- *
- * The routine increments @intf's usage counter. (However if the
- * autoresume fails then the counter is re-decremented.) So long as the
- * counter is greater than 0, autosuspend will not be allowed for @intf
- * or its usb_device. When the driver is finished using @intf it should
- * call usb_autopm_put_interface() to decrement the usage counter and
- * queue a delayed autosuspend request (if the counter is <= 0).
- *
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
- *
- * Resume method calls generated by this routine can arrive at any time
- * while @intf is suspended. They are not protected by the usb_device's
- * lock but only by its pm_mutex. Drivers must provide their own
- * synchronization.
+ * @intf's usage counter is incremented to prevent subsequent autosuspends.
+ * However if the autoresume fails then the counter is re-decremented.
*
* This routine can run only in process context.
*/
@@ -1693,9 +1558,16 @@
{
int status;
- status = usb_autopm_do_interface(intf, 1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ status = pm_runtime_get_sync(&intf->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&intf->dev);
+ else
+ atomic_inc(&intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1705,149 +1577,207 @@
* @intf: the usb_interface whose counter should be incremented
*
* This routine does much the same thing as
- * usb_autopm_get_interface(): it increments @intf's usage counter and
- * queues an autoresume request if the result is > 0. The differences
- * are that it does not acquire the device's pm_mutex (callers must
- * handle all synchronization issues themselves), and it does not
- * autoresume the device directly (it only queues a request). After a
- * successful call, the device will generally not yet be resumed.
+ * usb_autopm_get_interface(): It increments @intf's usage counter and
+ * queues an autoresume request if the device is suspended. The
+ * differences are that it does not perform any synchronization (callers
+ * should hold a private lock and handle all synchronization issues
+ * themselves), and it does not autoresume the device directly (it only
+ * queues a request). After a successful call, the device may not yet be
+ * resumed.
*
* This routine can run in atomic context.
*/
int usb_autopm_get_interface_async(struct usb_interface *intf)
{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
+ int status = 0;
+ enum rpm_status s;
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
+ /* Don't request a resume unless the interface is already suspending
+ * or suspended. Doing so would force a running suspend timer to be
+ * cancelled.
+ */
+ pm_runtime_get_noresume(&intf->dev);
+ s = ACCESS_ONCE(intf->dev.power.runtime_status);
+ if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
+ status = pm_request_resume(&intf->dev);
+
+ if (status < 0 && status != -EINPROGRESS)
+ pm_runtime_put_noidle(&intf->dev);
+ else
atomic_inc(&intf->pm_usage_cnt);
- if (atomic_read(&intf->pm_usage_cnt) > 0 &&
- udev->state == USB_STATE_SUSPENDED)
- queue_work(ksuspend_usb_wq, &udev->autoresume);
- }
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
-#else
-
-void usb_autosuspend_work(struct work_struct *work)
-{}
-
-void usb_autoresume_work(struct work_struct *work)
-{}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
- * usb_external_suspend_device - external suspend of a USB device and its interfaces
- * @udev: the usb_device to suspend
- * @msg: Power Management message describing this state transition
+ * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
*
- * This routine handles external suspend requests: ones not generated
- * internally by a USB driver (autosuspend) but rather coming from the user
- * (via sysfs) or the PM core (system sleep). The suspend will be carried
- * out regardless of @udev's usage counter or those of its interfaces,
- * and regardless of whether or not remote wakeup is enabled. Of course,
- * interface drivers still have the option of failing the suspend (if
- * there are unsuspended children, for example).
+ * This routine increments @intf's usage counter but does not carry out an
+ * autoresume.
*
- * The caller must hold @udev's device lock.
+ * This routine can run in atomic context.
*/
-int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
+void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
- do_unbind_rebind(udev, DO_UNBIND);
- usb_pm_lock(udev);
- status = usb_suspend_both(udev, msg);
- usb_pm_unlock(udev);
- return status;
-}
-
-/**
- * usb_external_resume_device - external resume of a USB device and its interfaces
- * @udev: the usb_device to resume
- * @msg: Power Management message describing this state transition
- *
- * This routine handles external resume requests: ones not generated
- * internally by a USB driver (autoresume) but rather coming from the user
- * (via sysfs), the PM core (system resume), or the device itself (remote
- * wakeup). @udev's usage counter is unaffected.
- *
- * The caller must hold @udev's device lock.
- */
-int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
-{
- int status;
-
- usb_pm_lock(udev);
- status = usb_resume_both(udev, msg);
udev->last_busy = jiffies;
- usb_pm_unlock(udev);
- if (status == 0)
- do_unbind_rebind(udev, DO_REBIND);
-
- /* Now that the device is awake, we can start trying to autosuspend
- * it again. */
- if (status == 0)
- usb_try_autosuspend_device(udev);
- return status;
+ atomic_inc(&intf->pm_usage_cnt);
+ pm_runtime_get_noresume(&intf->dev);
}
+EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
-int usb_suspend(struct device *dev, pm_message_t msg)
+/* Internal routine to check whether we may autosuspend a device. */
+static int autosuspend_check(struct usb_device *udev)
{
- struct usb_device *udev;
+ int i;
+ struct usb_interface *intf;
+ unsigned long suspend_time, j;
- udev = to_usb_device(dev);
-
- /* If udev is already suspended, we can skip this suspend and
- * we should also skip the upcoming system resume. High-speed
- * root hubs are an exception; they need to resume whenever the
- * system wakes up in order for USB-PERSIST port handover to work
- * properly.
+ /* Fail if autosuspend is disabled, or any interfaces are in use, or
+ * any interface drivers require remote wakeup but it isn't available.
*/
- if (udev->state == USB_STATE_SUSPENDED) {
- if (udev->parent || udev->speed != USB_SPEED_HIGH)
- udev->skip_sys_resume = 1;
- return 0;
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ if (udev->actconfig) {
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
+
+ /* We don't need to check interfaces that are
+ * disabled for runtime PM. Either they are unbound
+ * or else their drivers don't support autosuspend
+ * and so they are permanently active.
+ */
+ if (intf->dev.power.disable_depth)
+ continue;
+ if (atomic_read(&intf->dev.power.usage_count) > 0)
+ return -EBUSY;
+ if (intf->needs_remote_wakeup &&
+ !udev->do_remote_wakeup) {
+ dev_dbg(&udev->dev, "remote wakeup needed "
+ "for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow autosuspend if the device will need
+ * a reset-resume and any of its interface drivers
+ * doesn't include support or needs remote wakeup.
+ */
+ if (udev->quirks & USB_QUIRK_RESET_RESUME) {
+ struct usb_driver *driver;
+
+ driver = to_usb_driver(intf->dev.driver);
+ if (!driver->reset_resume ||
+ intf->needs_remote_wakeup)
+ return -EOPNOTSUPP;
+ }
+ }
}
- udev->skip_sys_resume = 0;
- return usb_external_suspend_device(udev, msg);
+ /* If everything is okay but the device hasn't been idle for long
+ * enough, queue a delayed autosuspend request.
+ */
+ j = ACCESS_ONCE(jiffies);
+ suspend_time = udev->last_busy + udev->autosuspend_delay;
+ if (time_before(j, suspend_time)) {
+ pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
+ round_jiffies_up_relative(suspend_time - j)));
+ return -EAGAIN;
+ }
+ return 0;
}
-int usb_resume(struct device *dev, pm_message_t msg)
+static int usb_runtime_suspend(struct device *dev)
{
- struct usb_device *udev;
- int status;
+ int status = 0;
- udev = to_usb_device(dev);
-
- /* If udev->skip_sys_resume is set then udev was already suspended
- * when the system sleep started, so we don't want to resume it
- * during this system wakeup.
+ /* A USB device can be suspended if it passes the various autosuspend
+ * checks. Runtime suspend for a USB device means suspending all the
+ * interfaces and then the device itself.
*/
- if (udev->skip_sys_resume)
- return 0;
- status = usb_external_resume_device(udev, msg);
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
- /* Avoid PM error messages for devices disconnected while suspended
- * as we'll display regular disconnect messages just a bit later.
- */
- if (status == -ENODEV)
- return 0;
+ if (autosuspend_check(udev) != 0)
+ return -EAGAIN;
+
+ status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+
+ /* If an interface fails the suspend, adjust the last_busy
+ * time so that we don't get another suspend attempt right
+ * away.
+ */
+ if (status) {
+ udev->last_busy = jiffies +
+ (udev->autosuspend_delay == 0 ?
+ HZ/2 : 0);
+ }
+
+ /* Prevent the parent from suspending immediately after */
+ else if (udev->parent) {
+ udev->parent->last_busy = jiffies;
+ }
+ }
+
+ /* Runtime suspend for a USB interface doesn't mean anything. */
return status;
}
-#endif /* CONFIG_PM */
+static int usb_runtime_resume(struct device *dev)
+{
+ /* Runtime resume for a USB device means resuming both the device
+ * and all its interfaces.
+ */
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
+
+ status = usb_resume_both(udev, PMSG_AUTO_RESUME);
+ udev->last_busy = jiffies;
+ return status;
+ }
+
+ /* Runtime resume for a USB interface doesn't mean anything. */
+ return 0;
+}
+
+static int usb_runtime_idle(struct device *dev)
+{
+ /* An idle USB device can be suspended if it passes the various
+ * autosuspend checks. An idle interface can be suspended at
+ * any time.
+ */
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (autosuspend_check(udev) != 0)
+ return 0;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
+}
+
+static struct dev_pm_ops usb_bus_pm_ops = {
+ .runtime_suspend = usb_runtime_suspend,
+ .runtime_resume = usb_runtime_resume,
+ .runtime_idle = usb_runtime_idle,
+};
+
+#else
+
+#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
+
+#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
+ .pm = &usb_bus_pm_ops,
};
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index bfc6c2e..c3536f1 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -34,7 +34,6 @@
int err = -ENODEV;
const struct file_operations *old_fops, *new_fops = NULL;
- lock_kernel();
down_read(&minor_rwsem);
c = usb_minors[minor];
@@ -53,7 +52,6 @@
fops_put(old_fops);
done:
up_read(&minor_rwsem);
- unlock_kernel();
return err;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 80995ef..2f8cedd 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/usb.h>
@@ -141,7 +142,7 @@
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
- 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
+ 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
@@ -1670,11 +1671,16 @@
}
}
for (i = 0; i < num_intfs; ++i) {
+ struct usb_host_interface *first_alt;
+ int iface_num;
+
+ first_alt = &new_config->intf_cache[i]->altsetting[0];
+ iface_num = first_alt->desc.bInterfaceNumber;
/* Set up endpoints for alternate interface setting 0 */
- alt = usb_find_alt_setting(new_config, i, 0);
+ alt = usb_find_alt_setting(new_config, iface_num, 0);
if (!alt)
/* No alt setting 0? Pick the first setting. */
- alt = &new_config->intf_cache[i]->altsetting[0];
+ alt = first_alt;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
@@ -1853,6 +1859,10 @@
return status;
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+
/* Workqueue routine for root-hub remote wakeup */
static void hcd_resume_work(struct work_struct *work)
{
@@ -1860,8 +1870,7 @@
struct usb_device *udev = hcd->self.root_hub;
usb_lock_device(udev);
- usb_mark_last_busy(udev);
- usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ usb_remote_wakeup(udev);
usb_unlock_device(udev);
}
@@ -1880,12 +1889,12 @@
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered)
- queue_work(ksuspend_usb_wq, &hcd->wakeup_work);
+ queue_work(pm_wq, &hcd->wakeup_work);
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
-#endif
+#endif /* CONFIG_USB_SUSPEND */
/*-------------------------------------------------------------------------*/
@@ -2030,7 +2039,7 @@
init_timer(&hcd->rh_timer);
hcd->rh_timer.function = rh_timer_func;
hcd->rh_timer.data = (unsigned long) hcd;
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
mutex_init(&hcd->bandwidth_mutex);
@@ -2230,7 +2239,7 @@
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
cancel_work_sync(&hcd->wakeup_work);
#endif
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index bbe2b92..a3cdb09 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -80,7 +80,7 @@
struct timer_list rh_timer; /* drives root-hub polling */
struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
struct work_struct wakeup_work; /* for remote wakeup */
#endif
@@ -248,7 +248,7 @@
/* xHCI specific functions */
/* Called by usb_alloc_dev to alloc HC device structures */
int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_release_dev to free HC device structures */
+ /* Called by usb_disconnect to free HC device structures */
void (*free_dev)(struct usb_hcd *, struct usb_device *);
/* Bandwidth computation functions */
@@ -286,6 +286,7 @@
*/
int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -463,16 +464,20 @@
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
#ifdef CONFIG_PM
-extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_USB_SUSPEND */
+
/*
* USB device fs stuff
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 20ecb4c..0940ccd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -71,7 +72,6 @@
unsigned mA_per_port; /* current for each child */
- unsigned init_done:1;
unsigned limited_power:1;
unsigned quiescing:1;
unsigned disconnected:1;
@@ -820,7 +820,6 @@
}
init3:
hub->quiescing = 0;
- hub->init_done = 1;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
@@ -861,11 +860,6 @@
int i;
cancel_delayed_work_sync(&hub->init_work);
- if (!hub->init_done) {
- hub->init_done = 1;
- usb_autopm_put_interface_no_suspend(
- to_usb_interface(hub->intfdev));
- }
/* khubd and related activity won't re-trigger */
hub->quiescing = 1;
@@ -1224,6 +1218,9 @@
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
+ /* Hubs have proper suspend/resume support */
+ usb_enable_autosuspend(hdev);
+
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
@@ -1402,10 +1399,8 @@
if (udev->children[i])
recursively_mark_NOTATTACHED(udev->children[i]);
}
- if (udev->state == USB_STATE_SUSPENDED) {
- udev->discon_suspended = 1;
+ if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
- }
udev->state = USB_STATE_NOTATTACHED;
}
@@ -1448,11 +1443,11 @@
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_init_wakeup(&udev->dev,
+ device_set_wakeup_capable(&udev->dev,
(udev->actconfig->desc.bmAttributes
& USB_CONFIG_ATT_WAKEUP));
else
- device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_capable(&udev->dev, 0);
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
@@ -1529,31 +1524,15 @@
udev->devnum = devnum;
}
-#ifdef CONFIG_USB_SUSPEND
-
-static void usb_stop_pm(struct usb_device *udev)
+static void hub_free_dev(struct usb_device *udev)
{
- /* Synchronize with the ksuspend thread to prevent any more
- * autosuspend requests from being submitted, and decrement
- * the parent's count of unsuspended children.
- */
- usb_pm_lock(udev);
- if (udev->parent && !udev->discon_suspended)
- usb_autosuspend_device(udev->parent);
- usb_pm_unlock(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- /* Stop any autosuspend or autoresume requests already submitted */
- cancel_delayed_work_sync(&udev->autosuspend);
- cancel_work_sync(&udev->autoresume);
+ /* Root hubs aren't real devices, so don't free HCD resources */
+ if (hcd->driver->free_dev && udev->parent)
+ hcd->driver->free_dev(hcd, udev);
}
-#else
-
-static inline void usb_stop_pm(struct usb_device *udev)
-{ }
-
-#endif
-
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
@@ -1622,7 +1601,7 @@
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
- usb_stop_pm(udev);
+ hub_free_dev(udev);
put_device(&udev->dev);
}
@@ -1799,9 +1778,18 @@
{
int err;
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
- usb_autoresume_device(udev->parent);
+ if (udev->parent) {
+ /* Initialize non-root-hub device wakeup to disabled;
+ * device (un)configuration controls wakeup capable
+ * sysfs power/wakeup controls wakeup enabled/disabled
+ */
+ device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_enable(&udev->dev, 1);
+ }
+
+ /* Tell the runtime-PM framework the device is active */
+ pm_runtime_set_active(&udev->dev);
+ pm_runtime_enable(&udev->dev);
usb_detect_quirks(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
@@ -1833,7 +1821,8 @@
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
- usb_stop_pm(udev);
+ pm_runtime_disable(&udev->dev);
+ pm_runtime_set_suspended(&udev->dev);
return err;
}
@@ -1982,7 +1971,7 @@
if (!(portstatus & USB_PORT_STAT_RESET) &&
(portstatus & USB_PORT_STAT_ENABLE)) {
if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_VARIABLE;
+ udev->speed = USB_SPEED_WIRELESS;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
@@ -2008,7 +1997,9 @@
struct usb_device *udev, unsigned int delay)
{
int i, status;
+ struct usb_hcd *hcd;
+ hcd = bus_to_hcd(udev->bus);
/* Block EHCI CF initialization during the port reset.
* Some companion controllers don't like it when they mix.
*/
@@ -2036,6 +2027,14 @@
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
update_address(udev, 0);
+ if (hcd->driver->reset_device) {
+ status = hcd->driver->reset_device(hcd, udev);
+ if (status < 0) {
+ dev_err(&udev->dev, "Cannot reset "
+ "HCD device state\n");
+ break;
+ }
+ }
/* FALL THROUGH */
case -ENOTCONN:
case -ENODEV:
@@ -2381,14 +2380,17 @@
}
/* caller has locked udev */
-static int remote_wakeup(struct usb_device *udev)
+int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
- usb_mark_last_busy(udev);
- status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ status = usb_autoresume_device(udev);
+ if (status == 0) {
+ /* Let the drivers do their thing, then... */
+ usb_autosuspend_device(udev);
+ }
}
return status;
}
@@ -2425,11 +2427,6 @@
return status;
}
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#endif
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
@@ -2496,11 +2493,6 @@
#else /* CONFIG_PM */
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#define hub_suspend NULL
#define hub_resume NULL
#define hub_reset_resume NULL
@@ -2645,14 +2637,7 @@
mutex_lock(&usb_address0_mutex);
- if ((hcd->driver->flags & HCD_USB3) && udev->config) {
- /* FIXME this will need special handling by the xHCI driver. */
- dev_dbg(&udev->dev,
- "xHCI reset of configured device "
- "not supported yet.\n");
- retval = -EINVAL;
- goto fail;
- } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+ if (!udev->config && oldspeed == USB_SPEED_SUPER) {
/* Don't reset USB 3.0 devices during an initial setup */
usb_set_device_state(udev, USB_STATE_DEFAULT);
} else {
@@ -2678,7 +2663,7 @@
*/
switch (udev->speed) {
case USB_SPEED_SUPER:
- case USB_SPEED_VARIABLE: /* fixed at 512 */
+ case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
case USB_SPEED_HIGH: /* fixed at 64 */
@@ -2706,7 +2691,7 @@
case USB_SPEED_SUPER:
speed = "super";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "variable";
type = "Wireless ";
break;
@@ -3006,7 +2991,7 @@
/* For a suspended device, treat this as a
* remote wakeup event.
*/
- status = remote_wakeup(udev);
+ status = usb_remote_wakeup(udev);
#endif
} else {
@@ -3192,6 +3177,7 @@
loop:
usb_ep0_reinit(udev);
release_address(udev);
+ hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -3259,7 +3245,7 @@
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
- goto loop2;
+ goto loop_disconnected;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3352,7 +3338,7 @@
msleep(10);
usb_lock_device(udev);
- ret = remote_wakeup(hdev->
+ ret = usb_remote_wakeup(hdev->
children[i-1]);
usb_unlock_device(udev);
if (ret < 0)
@@ -3419,7 +3405,7 @@
* kick_khubd() and allow autosuspend.
*/
usb_autopm_put_interface(intf);
- loop2:
+ loop_disconnected:
usb_unlock_device(hdev);
kref_put(&hub->kref, hub_release);
@@ -3446,7 +3432,7 @@
return 0;
}
-static struct usb_device_id hub_id_table [] = {
+static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index df73574..cd22027 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1316,7 +1316,7 @@
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
- dev_warn(&dev->dev, "selecting invalid altsetting %d",
+ dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
@@ -1471,7 +1471,7 @@
/* If not, reinstate the old alternate settings */
if (retval < 0) {
reset_old_alts:
- for (; i >= 0; i--) {
+ for (i--; i >= 0; i--) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
@@ -1843,7 +1843,6 @@
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
device_initialize(&intf->dev);
- mark_quiesced(intf);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab93918..f073c5c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,10 +103,19 @@
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
- /* By default, disable autosuspend for all non-hubs */
#ifdef CONFIG_USB_SUSPEND
- if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
- udev->autosuspend_disabled = 1;
+
+ /* By default, disable autosuspend for all devices. The hub driver
+ * will enable it for hubs.
+ */
+ usb_disable_autosuspend(udev);
+
+ /* Autosuspend can also be disabled if the initial autosuspend_delay
+ * is negative.
+ */
+ if (udev->autosuspend_delay < 0)
+ usb_autoresume_device(udev);
+
#endif
/* For the present, all devices default to USB-PERSIST enabled */
@@ -120,6 +129,7 @@
* for all devices. It will affect things like hub resets
* and EMF-related port disables.
*/
- udev->persist_enabled = 1;
+ if (!(udev->quirks & USB_QUIRK_RESET_MORPHS))
+ udev->persist_enabled = 1;
#endif /* CONFIG_PM */
}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5f3908f..43c002e 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,7 +115,7 @@
case USB_SPEED_HIGH:
speed = "480";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "480";
break;
case USB_SPEED_SUPER:
@@ -191,6 +191,36 @@
static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
static ssize_t
+show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev;
+
+ udev = to_usb_device(dev);
+ return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS));
+}
+
+static ssize_t
+set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int config;
+
+ if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1)
+ return -EINVAL;
+ usb_lock_device(udev);
+ if (config)
+ udev->quirks |= USB_QUIRK_RESET_MORPHS;
+ else
+ udev->quirks &= ~USB_QUIRK_RESET_MORPHS;
+ usb_unlock_device(udev);
+ return count;
+}
+
+static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
+ show_avoid_reset_quirk, set_avoid_reset_quirk);
+
+static ssize_t
show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -226,9 +256,10 @@
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
- usb_pm_lock(udev);
+
+ usb_lock_device(udev);
udev->persist_enabled = !!value;
- usb_pm_unlock(udev);
+ usb_unlock_device(udev);
return count;
}
@@ -315,20 +346,34 @@
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value;
+ int value, old_delay;
+ int rc;
if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
value <= - INT_MAX/HZ)
return -EINVAL;
value *= HZ;
+ usb_lock_device(udev);
+ old_delay = udev->autosuspend_delay;
udev->autosuspend_delay = value;
- if (value >= 0)
- usb_try_autosuspend_device(udev);
- else {
- if (usb_autoresume_device(udev) == 0)
+
+ if (old_delay < 0) { /* Autosuspend wasn't allowed */
+ if (value >= 0)
usb_autosuspend_device(udev);
+ } else { /* Autosuspend was allowed */
+ if (value < 0) {
+ rc = usb_autoresume_device(udev);
+ if (rc < 0) {
+ count = rc;
+ udev->autosuspend_delay = old_delay;
+ }
+ } else {
+ usb_try_autosuspend_device(udev);
+ }
}
+
+ usb_unlock_device(udev);
return count;
}
@@ -356,34 +401,25 @@
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
- int rc = 0;
- int old_autosuspend_disabled;
+ int rc;
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
usb_lock_device(udev);
- old_autosuspend_disabled = udev->autosuspend_disabled;
- /* Setting the flags without calling usb_pm_lock is a subject to
- * races, but who cares...
- */
if (len == sizeof on_string - 1 &&
- strncmp(buf, on_string, len) == 0) {
- udev->autosuspend_disabled = 1;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ strncmp(buf, on_string, len) == 0)
+ rc = usb_disable_autosuspend(udev);
- } else if (len == sizeof auto_string - 1 &&
- strncmp(buf, auto_string, len) == 0) {
- udev->autosuspend_disabled = 0;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ else if (len == sizeof auto_string - 1 &&
+ strncmp(buf, auto_string, len) == 0)
+ rc = usb_enable_autosuspend(udev);
- } else
+ else
rc = -EINVAL;
- if (rc)
- udev->autosuspend_disabled = old_autosuspend_disabled;
usb_unlock_device(udev);
return (rc < 0 ? rc : count);
}
@@ -558,6 +594,7 @@
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
+ &dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
NULL,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e7cae13..2708056 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -387,6 +387,13 @@
{
unsigned int orig_flags = urb->transfer_flags;
unsigned int allowed;
+ static int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+ };
+
+ /* Check that the pipe's type matches the endpoint's type */
+ if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+ return -EPIPE; /* The most suitable error code :-) */
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
@@ -430,7 +437,7 @@
case USB_ENDPOINT_XFER_INT:
/* too small? */
switch (dev->speed) {
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval < 6)
return -EINVAL;
break;
@@ -446,7 +453,7 @@
if (urb->interval > (1 << 15))
return -EINVAL;
max = 1 << 15;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval > 16)
return -EINVAL;
break;
@@ -473,7 +480,7 @@
default:
return -EINVAL;
}
- if (dev->speed != USB_SPEED_VARIABLE) {
+ if (dev->speed != USB_SPEED_WIRELESS) {
/* Round down to a power of 2, no more than max */
urb->interval = min(max, 1 << ilog2(urb->interval));
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0daff0d..1297e9b 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,9 +49,6 @@
static int nousb; /* Disable USB when built into kernel image */
-/* Workqueue for autosuspend and for remote wakeup of root hubs */
-struct workqueue_struct *ksuspend_usb_wq;
-
#ifdef CONFIG_USB_SUSPEND
static int usb_autosuspend_delay = 2; /* Default delay value,
* in seconds */
@@ -228,9 +225,6 @@
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
- /* Root hubs aren't real devices, so don't free HCD resources */
- if (hcd->driver->free_dev && udev->parent)
- hcd->driver->free_dev(hcd, udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
@@ -264,23 +258,6 @@
#ifdef CONFIG_PM
-static int ksuspend_usb_init(void)
-{
- /* This workqueue is supposed to be both freezable and
- * singlethreaded. Its job doesn't justify running on more
- * than one CPU.
- */
- ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
- if (!ksuspend_usb_wq)
- return -ENOMEM;
- return 0;
-}
-
-static void ksuspend_usb_cleanup(void)
-{
- destroy_workqueue(ksuspend_usb_wq);
-}
-
/* USB device Power-Management thunks.
* There's no need to distinguish here between quiescing a USB device
* and powering it down; the generic_suspend() routine takes care of
@@ -296,7 +273,7 @@
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
- usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */
+ usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */
}
static int usb_dev_suspend(struct device *dev)
@@ -342,9 +319,7 @@
#else
-#define ksuspend_usb_init() 0
-#define ksuspend_usb_cleanup() do {} while (0)
-#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
+#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
#endif /* CONFIG_PM */
@@ -472,9 +447,6 @@
INIT_LIST_HEAD(&dev->filelist);
#ifdef CONFIG_PM
- mutex_init(&dev->pm_mutex);
- INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
- INIT_WORK(&dev->autoresume, usb_autoresume_work);
dev->autosuspend_delay = usb_autosuspend_delay * HZ;
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
@@ -1117,9 +1089,6 @@
if (retval)
goto out;
- retval = ksuspend_usb_init();
- if (retval)
- goto out;
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
@@ -1159,7 +1128,7 @@
bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
- ksuspend_usb_cleanup();
+ usb_debugfs_cleanup();
out:
return retval;
}
@@ -1181,7 +1150,6 @@
usb_hub_cleanup();
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
- ksuspend_usb_cleanup();
usb_debugfs_cleanup();
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 4c36c7f..cd88220 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -55,24 +55,8 @@
extern int usb_suspend(struct device *dev, pm_message_t msg);
extern int usb_resume(struct device *dev, pm_message_t msg);
-extern void usb_autosuspend_work(struct work_struct *work);
-extern void usb_autoresume_work(struct work_struct *work);
extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
-extern int usb_external_suspend_device(struct usb_device *udev,
- pm_message_t msg);
-extern int usb_external_resume_device(struct usb_device *udev,
- pm_message_t msg);
-
-static inline void usb_pm_lock(struct usb_device *udev)
-{
- mutex_lock_nested(&udev->pm_mutex, udev->level);
-}
-
-static inline void usb_pm_unlock(struct usb_device *udev)
-{
- mutex_unlock(&udev->pm_mutex);
-}
#else
@@ -86,9 +70,6 @@
return 0;
}
-static inline void usb_pm_lock(struct usb_device *udev) {}
-static inline void usb_pm_unlock(struct usb_device *udev) {}
-
#endif
#ifdef CONFIG_USB_SUSPEND
@@ -96,6 +77,7 @@
extern void usb_autosuspend_device(struct usb_device *udev);
extern void usb_try_autosuspend_device(struct usb_device *udev);
extern int usb_autoresume_device(struct usb_device *udev);
+extern int usb_remote_wakeup(struct usb_device *dev);
#else
@@ -106,9 +88,13 @@
return 0;
}
+static inline int usb_remote_wakeup(struct usb_device *udev)
+{
+ return 0;
+}
+
#endif
-extern struct workqueue_struct *ksuspend_usb_wq;
extern struct bus_type usb_bus_type;
extern struct device_type usb_device_type;
extern struct device_type usb_if_device_type;
@@ -138,23 +124,6 @@
for_devices;
}
-/* Interfaces and their "power state" are owned by usbcore */
-
-static inline void mark_active(struct usb_interface *f)
-{
- f->is_active = 1;
-}
-
-static inline void mark_quiesced(struct usb_interface *f)
-{
- f->is_active = 0;
-}
-
-static inline int is_active(const struct usb_interface *f)
-{
- return f->is_active;
-}
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 2958a12..6e98a36 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -66,8 +66,6 @@
#define USB_DEBUG_DEVNUM 127
-#define DBGP_DATA_TOGGLE 0x8800
-
#ifdef DBGP_DEBUG
#define dbgp_printk printk
static void dbgp_ehci_status(char *str)
@@ -88,11 +86,6 @@
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
-static inline u32 dbgp_pid_update(u32 x, u32 tok)
-{
- return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
-}
-
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
@@ -136,6 +129,19 @@
#define DBGP_MAX_PACKET 8
#define DBGP_TIMEOUT (250 * 1000)
+#define DBGP_LOOPS 1000
+
+static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
+{
+ static int data0 = USB_PID_DATA1;
+ data0 ^= USB_PID_DATA_TOGGLE;
+ return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
+}
+
+static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
+{
+ return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
+}
static int dbgp_wait_until_complete(void)
{
@@ -180,7 +186,7 @@
{
u32 pids, lpid;
int ret;
- int loop = 3;
+ int loop = DBGP_LOOPS;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -197,6 +203,8 @@
*/
if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
dbgp_not_safe = 1;
+ if (ret == -DBGP_ERR_BAD && --loop > 0)
+ goto retry;
return ret;
}
@@ -245,12 +253,20 @@
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
-static int dbgp_out(u32 addr, const char *bytes, int size)
+static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
+ const char *bytes, int size)
{
+ int ret;
+ u32 addr;
u32 pids, ctrl;
+ if (size > DBGP_MAX_PACKET)
+ return -1;
+
+ addr = DBGP_EPADDR(devnum, endpoint);
+
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_OUT);
+ pids = dbgp_pid_write_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -260,34 +276,7 @@
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- return dbgp_wait_until_done(ctrl);
-}
-
-static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
- const char *bytes, int size)
-{
- int ret;
- int loops = 5;
- u32 addr;
- if (size > DBGP_MAX_PACKET)
- return -1;
-
- addr = DBGP_EPADDR(devnum, endpoint);
-try_again:
- if (loops--) {
- ret = dbgp_out(addr, bytes, size);
- if (ret == -DBGP_ERR_BAD) {
- int try_loops = 3;
- do {
- /* Emit a dummy packet to re-sync communication
- * with the debug device */
- if (dbgp_out(addr, "12345678", 8) >= 0) {
- udelay(2);
- goto try_again;
- }
- } while (try_loops--);
- }
- }
+ ret = dbgp_wait_until_done(ctrl);
return ret;
}
@@ -304,7 +293,7 @@
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_IN);
+ pids = dbgp_pid_read_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -362,7 +351,6 @@
return dbgp_bulk_read(devnum, 0, data, size);
}
-
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index ee41120..7460cd7 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -812,6 +812,16 @@
Say "y" to link the driver statically, or "m" to build a
dynamically linked module.
+config USB_G_NOKIA
+ tristate "Nokia composite gadget"
+ depends on PHONET
+ help
+ The Nokia composite gadget provides support for acm, obex
+ and phonet in only one composite gadget driver.
+
+ It's only really useful for N900 hardware. If you're building
+ a kernel for N900, say Y or M here. If unsure, say N.
+
config USB_G_MULTI
tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2e2c047..43b51da 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -43,6 +43,7 @@
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
+g_nokia-objs := nokia.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -55,4 +56,5 @@
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
+obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 043e04d..12ac9cd 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1656,9 +1656,7 @@
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start,
- res->end - res->start + 1,
- driver_name)) {
+ if (!request_mem_region(res->start, resource_size(res), driver_name)) {
DBG("someone's using UDC memory\n");
return -EBUSY;
}
@@ -1699,7 +1697,7 @@
udc->ep[3].maxpacket = 64;
}
- udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1);
+ udc->udp_baseaddr = ioremap(res->start, resource_size(res));
if (!udc->udp_baseaddr) {
retval = -ENOMEM;
goto fail0a;
@@ -1781,7 +1779,7 @@
if (cpu_is_at91rm9200())
gpio_free(udc->board.pullup_pin);
fail0:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
DBG("%s probe failed, %d\n", driver_name, retval);
return retval;
}
@@ -1813,7 +1811,7 @@
gpio_free(udc->board.pullup_pin);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
clk_put(udc->iclk);
clk_put(udc->fclk);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 4e970cf..f79bdfe 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -320,7 +320,7 @@
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
- return gpio_get_value(udc->vbus_pin);
+ return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
/* No Vbus detection: Assume always present */
return 1;
@@ -1763,7 +1763,7 @@
if (!udc->driver)
goto out;
- vbus = gpio_get_value(udc->vbus_pin);
+ vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
toggle_bias(1);
@@ -1914,14 +1914,14 @@
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
goto err_map_regs;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
- udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1);
+ udc->fifo = ioremap(fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
goto err_map_fifo;
@@ -2000,6 +2000,7 @@
if (gpio_is_valid(pdata->vbus_pin)) {
if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
udc->vbus_pin = pdata->vbus_pin;
+ udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
ret = request_irq(gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index f7baea3..88a2e07 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -323,6 +323,7 @@
struct platform_device *pdev;
int irq;
int vbus_pin;
+ int vbus_pin_inverted;
struct clk *pclk;
struct clk *hclk;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index cd0914e..65a5f94 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -265,16 +265,24 @@
return ep;
}
- } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
- /* single buffering is enough; maybe 8 byte fifo is too */
- ep = find_ep (gadget, "ep3in-bulk");
+#ifdef CONFIG_BLACKFIN
+ } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
+ if ((USB_ENDPOINT_XFER_BULK == type) ||
+ (USB_ENDPOINT_XFER_ISOC == type)) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep (gadget, "ep5in");
+ else
+ ep = find_ep (gadget, "ep6out");
+ } else if (USB_ENDPOINT_XFER_INT == type) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep(gadget, "ep1in");
+ else
+ ep = find_ep(gadget, "ep2out");
+ } else
+ ep = NULL;
if (ep && ep_matches (gadget, ep, desc))
return ep;
-
- } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
- ep = find_ep (gadget, "ep1-bulk");
- if (ep && ep_matches (gadget, ep, desc))
- return ep;
+#endif
}
/* Second, look at endpoints until an unclaimed one looks usable */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 141372b..400f803 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -259,7 +259,7 @@
/*-------------------------------------------------------------------------*/
-#ifdef USB_ETH_EEM
+#ifdef CONFIG_USB_ETH_EEM
static int use_eem = 1;
#else
static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d10353d..e49c732 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -702,14 +702,6 @@
/* Some controllers can't support CDC ACM ... */
static inline bool can_support_cdc(struct usb_configuration *c)
{
- /* SH3 doesn't support multiple interfaces */
- if (gadget_is_sh(c->cdev->gadget))
- return false;
-
- /* sa1100 doesn't have a third interrupt endpoint */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *probably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index ecf5bdd..2fff530 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -497,12 +497,9 @@
struct net_device *net;
/* Enable zlps by default for ECM conformance;
- * override for musb_hdrc (avoids txdma ovhead)
- * and sa1100 (can't).
+ * override for musb_hdrc (avoids txdma ovhead).
*/
- ecm->port.is_zlp_ok = !(
- gadget_is_sa1100(cdev->gadget)
- || gadget_is_musbhdrc(cdev->gadget)
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640e..b1935fe 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -368,7 +368,7 @@
struct task_struct *thread_task;
/* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -392,8 +392,12 @@
const char *lun_name_format;
const char *thread_name;
- /* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ /* Callback function to call when thread exits. If no
+ * callback is set or it returns value lower then zero MSF
+ * will force eject all LUNs it operates on (including those
+ * marked as non-removable or with prevent_medium_removal flag
+ * set). */
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -614,7 +618,12 @@
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *) req->buf = fsg->common->nluns - 1;
- return 1;
+
+ /* Respond with data/status */
+ req->length = min((u16)1, w_length);
+ fsg->common->ep0req_name =
+ ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
+ return ep0_queue(fsg->common);
}
VDBG(fsg,
@@ -2524,14 +2533,6 @@
case FSG_STATE_CONFIG_CHANGE:
rc = do_set_config(common, new_config);
- if (common->ep0_req_tag != exception_req_tag)
- break;
- if (rc != 0) { /* STALL on errors */
- DBG(common, "ep0 set halt\n");
- usb_ep_set_halt(common->ep0);
- } else { /* Complete the status stage */
- ep0_queue(common);
- }
break;
case FSG_STATE_EXIT:
@@ -2615,8 +2616,20 @@
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (common->thread_exits)
- common->thread_exits(common);
+ if (!common->thread_exits || common->thread_exits(common) < 0) {
+ struct fsg_lun *curlun = common->luns;
+ unsigned i = common->nluns;
+
+ down_write(&common->filesem);
+ for (; i--; ++curlun) {
+ if (!fsg_lun_is_open(curlun))
+ continue;
+
+ fsg_lun_close(curlun);
+ curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+ }
+ up_write(&common->filesem);
+ }
/* Let the unbind and cleanup routines know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2763,10 +2776,7 @@
if (cfg->release != 0xffff) {
i = cfg->release;
} else {
- /* The sa1100 controller is not supported */
- i = gadget_is_sa1100(gadget)
- ? -1
- : usb_gadget_controller_number(gadget);
+ i = usb_gadget_controller_number(gadget);
if (i >= 0) {
i = 0x0300 + i;
} else {
@@ -2791,8 +2801,7 @@
* disable stalls.
*/
common->can_stall = cfg->can_stall &&
- !(gadget_is_sh(common->gadget) ||
- gadget_is_at91(common->gadget));
+ !(gadget_is_at91(common->gadget));
spin_lock_init(&common->lock);
@@ -2852,7 +2861,6 @@
/* Call fsg_common_release() directly, ref might be not
* initialised */
fsg_common_release(&common->ref);
- complete(&common->thread_notifier);
return ERR_PTR(rc);
}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 95dae4c..a30e60c 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -769,10 +769,6 @@
/* Some controllers can't support RNDIS ... */
static inline bool can_support_rndis(struct usb_configuration *c)
{
- /* only two endpoints on sa1100 */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *presumably* fine */
return true;
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb02..a90dd2d 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3208,15 +3208,11 @@
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
- if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
+ if (gadget_is_at91(fsg->gadget))
mod_data.can_stall = 0;
if (mod_data.release == 0xffff) { // Parameter wasn't set
- /* The sa1100 controller is not supported */
- if (gadget_is_sa1100(fsg->gadget))
- gcnum = -1;
- else
- gcnum = usb_gadget_controller_number(fsg->gadget);
+ gcnum = usb_gadget_controller_number(fsg->gadget);
if (gcnum >= 0)
mod_data.release = 0x0300 + gcnum;
else {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 7881f12..3537d51 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2749,7 +2749,7 @@
}
/*-------------------------------------------------------------------------*/
-static struct of_device_id __devinitdata qe_udc_match[] = {
+static const struct of_device_id qe_udc_match[] __devinitconst = {
{
.compatible = "fsl,mpc8323-qe-usb",
.data = (void *)PORT_QE,
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b..1edbc12 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,46 +45,18 @@
#define gadget_is_goku(g) 0
#endif
-/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_SUPERH
-#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
-#else
-#define gadget_is_sh(g) 0
-#endif
-
-/* not yet stable on 2.6 (would help "original Zaurus") */
-#ifdef CONFIG_USB_GADGET_SA1100
-#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
-#else
-#define gadget_is_sa1100(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_LH7A40X
#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
#else
#define gadget_is_lh7a40x(g) 0
#endif
-/* handhelds.org tree (?) */
-#ifdef CONFIG_USB_GADGET_MQ11XX
-#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
-#else
-#define gadget_is_mq11xx(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_OMAP
#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
#else
#define gadget_is_omap(g) 0
#endif
-/* not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_N9604
-#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
-#else
-#define gadget_is_n9604(g) 0
-#endif
-
/* various unstable versions available */
#ifdef CONFIG_USB_GADGET_PXA27X
#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
@@ -122,14 +94,6 @@
#define gadget_is_fsl_usb2(g) 0
#endif
-/* Mentor high speed function controller */
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MUSBHSFC
-#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
-#else
-#define gadget_is_musbhsfc(g) 0
-#endif
-
/* Mentor high speed "dual role" controller, in peripheral role */
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name)
@@ -143,13 +107,6 @@
#define gadget_is_langwell(g) 0
#endif
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MPC8272
-#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
-#else
-#define gadget_is_mpc8272(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_M66592
#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
#else
@@ -203,20 +160,12 @@
return 0x02;
else if (gadget_is_pxa(gadget))
return 0x03;
- else if (gadget_is_sh(gadget))
- return 0x04;
- else if (gadget_is_sa1100(gadget))
- return 0x05;
else if (gadget_is_goku(gadget))
return 0x06;
- else if (gadget_is_mq11xx(gadget))
- return 0x07;
else if (gadget_is_omap(gadget))
return 0x08;
else if (gadget_is_lh7a40x(gadget))
return 0x09;
- else if (gadget_is_n9604(gadget))
- return 0x10;
else if (gadget_is_pxa27x(gadget))
return 0x11;
else if (gadget_is_s3c2410(gadget))
@@ -225,12 +174,8 @@
return 0x13;
else if (gadget_is_imx(gadget))
return 0x14;
- else if (gadget_is_musbhsfc(gadget))
- return 0x15;
else if (gadget_is_musbhdrc(gadget))
return 0x16;
- else if (gadget_is_mpc8272(gadget))
- return 0x17;
else if (gadget_is_atmel_usba(gadget))
return 0x18;
else if (gadget_is_fsl_usb2(gadget))
@@ -265,10 +210,6 @@
if (gadget_is_pxa27x(gadget))
return false;
- /* SH3 hardware just doesn't do altsettings */
- if (gadget_is_sh(gadget))
- return false;
-
/* Everything else is *presumably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 5f6a2e0..04f6224 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -618,11 +618,6 @@
}
#endif
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- ERROR(dev, "can't change configurations\n");
- return -ESPIPE;
- }
gmidi_reset_config(dev);
switch (number) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 112bb40..e8edc64 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1859,7 +1859,7 @@
/*-------------------------------------------------------------------------*/
-static struct pci_device_id pci_ids [] = { {
+static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index bf0f652..de8a838 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -194,7 +194,7 @@
};
struct ep_data {
- struct semaphore lock;
+ struct mutex lock;
enum ep_state state;
atomic_t count;
struct dev_data *dev;
@@ -298,10 +298,10 @@
int val;
if (f_flags & O_NONBLOCK) {
- if (down_trylock (&epdata->lock) != 0)
+ if (!mutex_trylock(&epdata->lock))
goto nonblock;
if (epdata->state != STATE_EP_ENABLED) {
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
nonblock:
val = -EAGAIN;
} else
@@ -309,7 +309,8 @@
return val;
}
- if ((val = down_interruptible (&epdata->lock)) < 0)
+ val = mutex_lock_interruptible(&epdata->lock);
+ if (val < 0)
return val;
switch (epdata->state) {
@@ -323,7 +324,7 @@
// FALLTHROUGH
case STATE_EP_UNBOUND: /* clean disconnect */
val = -ENODEV;
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
}
return val;
}
@@ -393,7 +394,7 @@
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -411,7 +412,7 @@
value = -EFAULT;
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -436,7 +437,7 @@
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -455,7 +456,7 @@
VDEBUG (data->dev, "%s write %zu IN, status %d\n",
data->name, len, (int) value);
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -466,7 +467,8 @@
struct ep_data *data = fd->private_data;
int value;
- if ((value = down_interruptible(&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
/* clean up if this can be reopened */
@@ -476,7 +478,7 @@
data->hs_desc.bDescriptorType = 0;
usb_ep_disable(data->ep);
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
put_ep (data);
return 0;
}
@@ -507,7 +509,7 @@
} else
status = -ENODEV;
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return status;
}
@@ -673,7 +675,7 @@
value = -ENODEV;
spin_unlock_irq(&epdata->dev->lock);
- up(&epdata->lock);
+ mutex_unlock(&epdata->lock);
if (unlikely(value)) {
kfree(priv);
@@ -765,7 +767,8 @@
u32 tag;
int value, length = len;
- if ((value = down_interruptible (&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
if (data->state != STATE_EP_READY) {
@@ -854,7 +857,7 @@
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
fail0:
value = -EINVAL;
@@ -870,7 +873,7 @@
struct ep_data *data = inode->i_private;
int value = -EBUSY;
- if (down_interruptible (&data->lock) != 0)
+ if (mutex_lock_interruptible(&data->lock) != 0)
return -EINTR;
spin_lock_irq (&data->dev->lock);
if (data->dev->state == STATE_DEV_UNBOUND)
@@ -885,7 +888,7 @@
DBG (data->dev, "%s state %d\n",
data->name, data->state);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
}
@@ -1631,7 +1634,7 @@
if (!data)
goto enomem0;
data->state = STATE_EP_DISABLED;
- init_MUTEX (&data->lock);
+ mutex_init(&data->lock);
init_waitqueue_head (&data->wait);
strncpy (data->name, ep->name, sizeof (data->name) - 1);
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 19619fb..705cc1f 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -135,6 +135,12 @@
static unsigned long msg_registered = 0;
static void msg_cleanup(void);
+static int msg_thread_exits(struct fsg_common *common)
+{
+ msg_cleanup();
+ return 0;
+}
+
static int __init msg_do_config(struct usb_configuration *c)
{
struct fsg_common *common;
@@ -147,7 +153,7 @@
}
fsg_config_from_params(&config, &mod_data);
- config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
+ config.thread_exits = msg_thread_exits;
common = fsg_common_init(0, c->cdev, &config);
if (IS_ERR(common))
return PTR_ERR(common);
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
new file mode 100644
index 0000000..7d6b66a
--- /dev/null
+++ b/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
+/*
+ * nokia.c -- Nokia Composite Gadget Driver
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This gadget driver borrows from serial.c which is:
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * version 2 of that License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "u_ether.h"
+#include "u_phonet.h"
+#include "gadget_chips.h"
+
+/* Defines */
+
+#define NOKIA_VERSION_NUM 0x0211
+#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_ecm.c"
+#include "f_obex.c"
+#include "f_serial.c"
+#include "f_phonet.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
+#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
+
+static char manufacturer_nokia[] = "Nokia";
+static const char product_nokia[] = NOKIA_LONG_NAME;
+static const char description_nokia[] = "PC-Suite Configuration";
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
+ [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
+ [STRING_DESCRIPTION_IDX].s = description_nokia,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_COMM,
+ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ .bNumConfigurations = 1,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Module */
+MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+static u8 hostaddr[ETH_ALEN];
+
+static int __init nokia_bind_config(struct usb_configuration *c)
+{
+ int status = 0;
+
+ status = phonet_bind_config(c);
+ if (status)
+ printk(KERN_DEBUG "could not bind phonet config\n");
+
+ status = obex_bind_config(c, 0);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = obex_bind_config(c, 1);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = acm_bind_config(c, 2);
+ if (status)
+ printk(KERN_DEBUG "could not bind acm config\n");
+
+ status = ecm_bind_config(c, hostaddr);
+ if (status)
+ printk(KERN_DEBUG "could not bind ecm config\n");
+
+ return status;
+}
+
+static struct usb_configuration nokia_config_500ma_driver = {
+ .label = "Bus Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE,
+ .bMaxPower = 250, /* 500mA */
+};
+
+static struct usb_configuration nokia_config_100ma_driver = {
+ .label = "Self Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 50, /* 100 mA */
+};
+
+static int __init nokia_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+
+ status = gphonet_setup(cdev->gadget);
+ if (status < 0)
+ goto err_phonet;
+
+ status = gserial_setup(cdev->gadget, 3);
+ if (status < 0)
+ goto err_serial;
+
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ goto err_ether;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+
+ device_desc.iProduct = status;
+
+ /* config description */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+ nokia_config_500ma_driver.iConfiguration = status;
+ nokia_config_100ma_driver.iConfiguration = status;
+
+ /* set up other descriptors */
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
+ else {
+ /* this should only work with hw that supports altsettings
+ * and several endpoints, anything else, panic.
+ */
+ pr_err("nokia_bind: controller '%s' not recognized\n",
+ gadget->name);
+ goto err_usb;
+ }
+
+ /* finaly register the configuration */
+ status = usb_add_config(cdev, &nokia_config_500ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ status = usb_add_config(cdev, &nokia_config_100ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
+
+ return 0;
+
+err_usb:
+ gether_cleanup();
+err_ether:
+ gserial_cleanup();
+err_serial:
+ gphonet_cleanup();
+err_phonet:
+ return status;
+}
+
+static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+{
+ gphonet_cleanup();
+ gserial_cleanup();
+ gether_cleanup();
+
+ return 0;
+}
+
+static struct usb_composite_driver nokia_driver = {
+ .name = "g_nokia",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = nokia_bind,
+ .unbind = __exit_p(nokia_unbind),
+};
+
+static int __init nokia_init(void)
+{
+ return usb_composite_register(&nokia_driver);
+}
+module_init(nokia_init);
+
+static void __exit nokia_cleanup(void)
+{
+ usb_composite_unregister(&nokia_driver);
+}
+module_exit(nokia_cleanup);
+
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2d867fd..6b8bf8c 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -949,12 +949,6 @@
int result = 0;
struct usb_gadget *gadget = dev->gadget;
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change configurations\n");
- return -ESPIPE;
- }
-
switch (number) {
case DEV_CONFIG_VALUE:
result = 0;
@@ -1033,12 +1027,6 @@
{
int result = 0;
- if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change interfaces\n");
- return -ESPIPE;
- }
-
/* Free the current interface */
switch (dev->interface) {
case PRINTER_INTERFACE:
@@ -1392,12 +1380,6 @@
goto fail;
}
- if (gadget_is_sa1100(gadget)) {
- /* hardware can't write zero length packets. */
- ERROR(dev, "SA1100 controller is unsupport by this driver\n");
- goto fail;
- }
-
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0) {
device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index adda120..05b892c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -742,13 +742,17 @@
* @ep: pxa physical endpoint
* @req: pxa request
* @status: usb request status sent to gadget API
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held if flags not NULL, else ep->lock released
*
* Retire a pxa27x usb request. Endpoint must be locked.
*/
-static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
+static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
+ unsigned long *pflags)
{
+ unsigned long flags;
+
ep_del_request(ep, req);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
@@ -760,38 +764,48 @@
&req->req, status,
req->req.actual, req->req.length);
+ if (pflags)
+ spin_unlock_irqrestore(&ep->lock, *pflags);
+ local_irq_save(flags);
req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
+ local_irq_restore(flags);
+ if (pflags)
+ spin_lock_irqsave(&ep->lock, *pflags);
}
/**
* ep_end_out_req - Ends endpoint OUT request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint OUT request (completes usb request).
*/
-static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, !USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint OUT request (completes usb request), and puts
* control endpoint into idle state
*/
-static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, OUT_STATUS_STAGE);
- ep_end_out_req(ep, req);
+ ep_end_out_req(ep, req, pflags);
ep0_idle(ep->dev);
}
@@ -799,31 +813,35 @@
* ep_end_in_req - Ends endpoint IN request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint IN request (completes usb request).
*/
-static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_in_req - Ends control endpoint IN request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint IN request (completes usb request), and puts
* control endpoint into status state
*/
-static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, IN_STATUS_STAGE);
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, pflags);
}
/**
@@ -831,19 +849,22 @@
* @ep: pxa endpoint
* @status: usb request status
*
- * Context: ep->lock held
+ * Context: ep->lock released
*
* Dequeues all requests on an endpoint. As a side effect, interrupts will be
* disabled on that endpoint (because no more requests).
*/
static void nuke(struct pxa_ep *ep, int status)
{
- struct pxa27x_request *req;
+ struct pxa27x_request *req;
+ unsigned long flags;
+ spin_lock_irqsave(&ep->lock, flags);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pxa27x_request, queue);
- req_done(ep, req, status);
+ req_done(ep, req, status, &flags);
}
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -1123,6 +1144,7 @@
int rc = 0;
int is_first_req;
unsigned length;
+ int recursion_detected;
req = container_of(_req, struct pxa27x_request, req);
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
@@ -1152,6 +1174,7 @@
return -EMSGSIZE;
spin_lock_irqsave(&ep->lock, flags);
+ recursion_detected = ep->in_handle_ep;
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
@@ -1161,12 +1184,12 @@
if (!ep->enabled) {
_req->status = -ESHUTDOWN;
rc = -ESHUTDOWN;
- goto out;
+ goto out_locked;
}
if (req->in_use) {
ep_err(ep, "refusing to queue req %p (already queued)\n", req);
- goto out;
+ goto out_locked;
}
length = _req->length;
@@ -1174,12 +1197,13 @@
_req->actual = 0;
ep_add_request(ep, req);
+ spin_unlock_irqrestore(&ep->lock, flags);
if (is_ep0(ep)) {
switch (dev->ep0state) {
case WAIT_ACK_SET_CONF_INTERF:
if (length == 0) {
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, NULL);
} else {
ep_err(ep, "got a request of %d bytes while"
"in state WAIT_ACK_SET_CONF_INTERF\n",
@@ -1192,12 +1216,12 @@
case IN_DATA_STAGE:
if (!ep_is_full(ep))
if (write_ep0_fifo(ep, req))
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE:
if ((length == 0) || !epout_has_pkt(ep))
if (read_ep0_fifo(ep, req))
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
default:
ep_err(ep, "odd state %s to send me a request\n",
@@ -1207,12 +1231,15 @@
break;
}
} else {
- handle_ep(ep);
+ if (!recursion_detected)
+ handle_ep(ep);
}
out:
- spin_unlock_irqrestore(&ep->lock, flags);
return rc;
+out_locked:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ goto out;
}
/**
@@ -1242,13 +1269,14 @@
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
- req_done(ep, req, -ECONNRESET);
rc = 0;
break;
}
}
spin_unlock_irqrestore(&ep->lock, flags);
+ if (!rc)
+ req_done(ep, req, -ECONNRESET, NULL);
return rc;
}
@@ -1445,7 +1473,6 @@
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
- unsigned long flags;
if (!_ep)
return -EINVAL;
@@ -1455,10 +1482,8 @@
if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
return -EINVAL;
- spin_lock_irqsave(&ep->lock, flags);
ep->enabled = 0;
nuke(ep, -ESHUTDOWN);
- spin_unlock_irqrestore(&ep->lock, flags);
pxa_ep_fifo_flush(_ep);
udc_usb_ep->pxa_ep = NULL;
@@ -1907,8 +1932,10 @@
} u;
int i;
int have_extrabytes = 0;
+ unsigned long flags;
nuke(ep, -EPROTO);
+ spin_lock_irqsave(&ep->lock, flags);
/*
* In the PXA320 manual, in the section about Back-to-Back setup
@@ -1947,10 +1974,13 @@
/* Tell UDC to enter Data Stage */
ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
+ spin_unlock_irqrestore(&ep->lock, flags);
i = udc->driver->setup(&udc->gadget, &u.r);
+ spin_lock_irqsave(&ep->lock, flags);
if (i < 0)
goto stall;
out:
+ spin_unlock_irqrestore(&ep->lock, flags);
return;
stall:
ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
@@ -2055,13 +2085,13 @@
if (req && !ep_is_full(ep))
completed = write_ep0_fifo(ep, req);
if (completed)
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
if (epout_has_pkt(ep) && req)
completed = read_ep0_fifo(ep, req);
if (completed)
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
case STALL:
ep_write_UDCCSR(ep, UDCCSR0_FST);
@@ -2091,7 +2121,7 @@
* Tries to transfer all pending request data into the endpoint and/or
* transfer all pending data in the endpoint into usb requests.
*
- * Is always called when in_interrupt() or with ep->lock held.
+ * Is always called when in_interrupt() and with ep->lock released.
*/
static void handle_ep(struct pxa_ep *ep)
{
@@ -2100,10 +2130,17 @@
u32 udccsr;
int is_in = ep->dir_in;
int loop = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ if (ep->in_handle_ep)
+ goto recursion_detected;
+ ep->in_handle_ep = 1;
do {
completed = 0;
udccsr = udc_ep_readl(ep, UDCCSR);
+
if (likely(!list_empty(&ep->queue)))
req = list_entry(ep->queue.next,
struct pxa27x_request, queue);
@@ -2122,15 +2159,22 @@
if (unlikely(is_in)) {
if (likely(!ep_is_full(ep)))
completed = write_fifo(ep, req);
- if (completed)
- ep_end_in_req(ep, req);
} else {
if (likely(epout_has_pkt(ep)))
completed = read_fifo(ep, req);
- if (completed)
- ep_end_out_req(ep, req);
+ }
+
+ if (completed) {
+ if (is_in)
+ ep_end_in_req(ep, req, &flags);
+ else
+ ep_end_out_req(ep, req, &flags);
}
} while (completed);
+
+ ep->in_handle_ep = 0;
+recursion_detected:
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -2218,9 +2262,13 @@
continue;
udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
@@ -2228,9 +2276,12 @@
if (!(udcisr1 & UDCISR_INT_MASK))
continue;
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
}
@@ -2439,7 +2490,7 @@
}
retval = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
goto err_map;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index e25225e..ff61e48 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -318,6 +318,11 @@
* @queue: requests queue
* @lock: lock to pxa_ep data (queues and stats)
* @enabled: true when endpoint enabled (not stopped by gadget layer)
+ * @in_handle_ep: number of recursions of handle_ep() function
+ * Prevents deadlocks or infinite recursions of types :
+ * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
+ * or
+ * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
* @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
* @name: endpoint name (for trace/debug purpose)
* @dir_in: 1 if IN endpoint, 0 if OUT endpoint
@@ -346,6 +351,7 @@
spinlock_t lock; /* Protects this structure */
/* (queues, stats) */
unsigned enabled:1;
+ unsigned in_handle_ep:1;
unsigned idx:5;
char *name;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 5fc80a1..7e5bf59 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -317,7 +317,8 @@
*
* Allocate a new USB request structure appropriate for the specified endpoint
*/
-struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
+ gfp_t flags)
{
struct s3c_hsotg_req *req;
@@ -373,7 +374,7 @@
req->dma = DMA_ADDR_INVALID;
hs_req->mapped = 0;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
}
}
@@ -755,7 +756,7 @@
hs_req->mapped = 1;
req->dma = dma;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
hs_req->mapped = 0;
}
@@ -1460,7 +1461,7 @@
* as the actual data should be sent to the memory directly and we turn
* on the completion interrupts to get notifications of transfer completion.
*/
-void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
{
u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
u32 epnum, status, size;
@@ -3094,7 +3095,7 @@
local_irq_restore(flags);
}
-struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd..84ca195 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -746,6 +746,10 @@
.ndo_validate_addr = eth_validate_addr,
};
+static struct device_type gadget_type = {
+ .name = "gadget",
+};
+
/**
* gether_setup - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
@@ -808,6 +812,7 @@
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
if (status < 0) {
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fd55f45..3c8c0c9 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -93,13 +93,6 @@
if (!gadget_supports_altsettings(gadget))
return false;
- /* SA1100 can do ECM, *without* status endpoint ... but we'll
- * only use it in non-ECM mode for backwards compatibility
- * (and since we currently require a status endpoint)
- */
- if (gadget_is_sa1100(gadget))
- return false;
-
/* Everything else is *presumably* fine ... but this is a bit
* chancy, so be **CERTAIN** there are no hardware issues with
* your controller. Add it above if it can't handle CDC.
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2d77240..fac81ee 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -297,12 +297,10 @@
*/
if (loopdefault) {
loopback_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- sourcesink_add(cdev, autoresume != 0);
+ sourcesink_add(cdev, autoresume != 0);
} else {
sourcesink_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- loopback_add(cdev, autoresume != 0);
+ loopback_add(cdev, autoresume != 0);
}
gcnum = usb_gadget_controller_number(gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2678a16..8d3df03 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -399,3 +399,14 @@
To compile this driver a module, choose M here: the module
will be called "hwa-hc".
+
+config USB_IMX21_HCD
+ tristate "iMX21 HCD support"
+ depends on USB && ARM && MACH_MX21
+ help
+ This driver enables support for the on-chip USB host in the
+ iMX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
+
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b249..4e0c67f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,3 +32,5 @@
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
+obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
+
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c..51bd0ed 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index dbfb482..e3a74e7 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct resource *res;
int ret;
if (usb_disabled())
@@ -144,8 +145,9 @@
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("request_mem_region failed");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9911749..0e26aa1 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2005 MontaVista Software
+ * Copyright 2005-2009 MontaVista Software, Inc.
+ * Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
+ * Power Management support by Dave Liu <daveliu@freescale.com>,
+ * Jerry Huang <Chang-Ming.Huang@freescale.com> and
+ * Anton Vorontsov <avorontsov@ru.mvista.com>.
*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
-/* FIXME: Power Management is un-ported so temporarily disable it */
-#undef CONFIG_PM
-
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -40,8 +44,8 @@
* Allocates basic resources for this USB host controller.
*
*/
-int usb_hcd_fsl_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
@@ -147,7 +151,8 @@
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
-void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
@@ -284,10 +289,81 @@
return retval;
}
+struct ehci_fsl {
+ struct ehci_hcd ehci;
+
+#ifdef CONFIG_PM
+ /* Saved USB PHY settings, need to restore after deep sleep. */
+ u32 usb_ctrl;
+#endif
+};
+
+#ifdef CONFIG_PM
+
+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return container_of(ehci, struct ehci_fsl, ehci);
+}
+
+static int ehci_fsl_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ return 0;
+}
+
+static int ehci_fsl_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ /* Restore USB PHY settings and enable the controller. */
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+
+ ehci_reset(ehci);
+ ehci_fsl_reinit(ehci);
+
+ return 0;
+}
+
+static int ehci_fsl_drv_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ return 0;
+}
+
+static struct dev_pm_ops ehci_fsl_pm_ops = {
+ .suspend = ehci_fsl_drv_suspend,
+ .resume = ehci_fsl_drv_resume,
+ .restore = ehci_fsl_drv_restore,
+};
+
+#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
+#else
+#define EHCI_FSL_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
+ .hcd_priv_size = sizeof(struct ehci_fsl),
/*
* generic hardware linkage
@@ -354,6 +430,7 @@
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = "fsl-ehci",
+ .pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 35c56f4..23cd917 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -162,6 +162,17 @@
goto err_ioremap;
}
+ /* call platform specific init function */
+ if (pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret) {
+ dev_err(dev, "platform init failed\n");
+ goto err_init;
+ }
+ /* platforms need some time to settle changed IO settings */
+ mdelay(10);
+ }
+
/* enable clocks */
priv->usbclk = clk_get(dev, "usb");
if (IS_ERR(priv->usbclk)) {
@@ -192,18 +203,6 @@
if (ret < 0)
goto err_init;
- /* call platform specific init function */
- if (pdata->init) {
- ret = pdata->init(pdev);
- if (ret) {
- dev_err(dev, "platform init failed\n");
- goto err_init;
- }
- }
-
- /* most platforms need some time to settle changed IO settings */
- mdelay(10);
-
/* Initialize the transceiver */
if (pdata->otg) {
pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 74d07f4..f0282d6 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -26,10 +26,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Feb 23rd, 2009):
+ * TODO (last updated Feb 12, 2010):
* - add kernel-doc
* - enable AUTOIDLE
- * - move DPLL5 programming to clock fw
* - add suspend/resume
* - move workarounds to board-files
*/
@@ -37,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <plat/usb.h>
/*
@@ -178,6 +178,11 @@
void __iomem *uhh_base;
void __iomem *tll_base;
void __iomem *ehci_base;
+
+ /* Regulators for USB PHYs.
+ * Each PHY can have a seperate regulator.
+ */
+ struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
/*-------------------------------------------------------------------------*/
@@ -546,6 +551,8 @@
int irq = platform_get_irq(pdev, 0);
int ret = -ENODEV;
+ int i;
+ char supply[7];
if (!pdata) {
dev_dbg(&pdev->dev, "missing platform_data\n");
@@ -613,6 +620,21 @@
goto err_tll_ioremap;
}
+ /* get ehci regulator and enable */
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
+ omap->regulator[i] = NULL;
+ continue;
+ }
+ snprintf(supply, sizeof(supply), "hsusb%d", i);
+ omap->regulator[i] = regulator_get(omap->dev, supply);
+ if (IS_ERR(omap->regulator[i]))
+ dev_dbg(&pdev->dev,
+ "failed to get ehci port%d regulator\n", i);
+ else
+ regulator_enable(omap->regulator[i]);
+ }
+
ret = omap_start_ehc(omap, hcd);
if (ret) {
dev_dbg(&pdev->dev, "failed to start ehci\n");
@@ -622,13 +644,12 @@
omap->ehci->regs = hcd->regs
+ HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
+ dbg_hcs_params(omap->ehci, "reset");
+ dbg_hcc_params(omap->ehci, "reset");
+
/* cache this readonly data; minimize chip reads */
omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
- /* SET 1 micro-frame Interrupt interval */
- writel(readl(&omap->ehci->regs->command) | (1 << 16),
- &omap->ehci->regs->command);
-
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
@@ -641,6 +662,12 @@
omap_stop_ehc(omap, hcd);
err_start:
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
err_tll_ioremap:
@@ -674,13 +701,21 @@
{
struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+ int i;
usb_remove_hcd(hcd);
omap_stop_ehc(omap, hcd);
iounmap(hcd->regs);
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
usb_put_hcd(hcd);
+ kfree(omap);
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1..0f87dc7 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@
goto err1;
}
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
err = -EBUSY;
goto err1;
}
- regs = ioremap(res->start, res->end - res->start + 1);
+ regs = ioremap(res->start, resource_size(res));
if (regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
err = -EFAULT;
@@ -244,7 +244,7 @@
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@
err3:
iounmap(regs);
err2:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
err1:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da..8df33b8 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -161,9 +161,9 @@
ehci->ohci_hcctrl_reg = ioremap(res.start +
OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
- pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+ pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
} else {
ehci->has_amcc_usb23 = 1;
}
@@ -241,7 +241,7 @@
else
release_mem_region(res.start, 0x4);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
@@ -264,7 +264,7 @@
}
-static struct of_device_id ehci_hcd_ppc_of_match[] = {
+static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
},
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1e391e6..39340ae 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... */
+ free_cached_itd_list(ehci);
+
ehci->next_uframe = -1;
return 0;
}
@@ -2322,9 +2324,13 @@
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live &&
- (q.sitd->hw_results &
- SITD_ACTIVE(ehci))) {
+ if (((frame == clock_frame) ||
+ (((frame + 1) % ehci->periodic_size)
+ == clock_frame))
+ && live
+ && (q.sitd->hw_results &
+ SITD_ACTIVE(ehci))) {
+
incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a586153..f603bb2 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -177,21 +177,21 @@
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -281,7 +281,7 @@
}
-static struct of_device_id ehci_hcd_xilinx_of_match[] = {
+static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 78e7c3c..5dcfb3d 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -433,7 +433,7 @@
return -ENOMEM;
/* allocate the private part of the URB */
- urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags);
+ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
if (!urb_priv->tds) {
kfree(urb_priv);
return -ENOMEM;
@@ -805,7 +805,7 @@
return fhci_remove(&ofdev->dev);
}
-static struct of_device_id of_fhci_match[] = {
+static const struct of_device_id of_fhci_match[] = {
{ .compatible = "fsl,mpc8323-qe-usb", },
{},
};
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 0000000..512f647
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2009 by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of imx21-hcd.c */
+
+#ifndef DEBUG
+
+static inline void create_debug_files(struct imx21 *imx21) { }
+static inline void remove_debug_files(struct imx21 *imx21) { }
+static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
+ int status) {}
+static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_etd_allocated(struct imx21 *imx21) {}
+static inline void debug_etd_freed(struct imx21 *imx21) {}
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
+static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
+static inline void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td) {}
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len) {}
+
+#else
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static const char *dir_labels[] = {
+ "TD 0",
+ "OUT",
+ "IN",
+ "TD 1"
+};
+
+static const char *speed_labels[] = {
+ "Full",
+ "Low"
+};
+
+static const char *format_labels[] = {
+ "Control",
+ "ISO",
+ "Bulk",
+ "Interrupt"
+};
+
+static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
+ struct urb *urb)
+{
+ return usb_pipeisoc(urb->pipe) ?
+ &imx21->isoc_stats : &imx21->nonisoc_stats;
+}
+
+static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->submitted++;
+}
+
+static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
+{
+ if (st)
+ stats_for_urb(imx21, urb)->completed_failed++;
+ else
+ stats_for_urb(imx21, urb)->completed_ok++;
+}
+
+static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->unlinked++;
+}
+
+static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_etd++;
+}
+
+static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_dmem++;
+}
+
+static inline void debug_etd_allocated(struct imx21 *imx21)
+{
+ imx21->etd_usage.maximum = max(
+ ++(imx21->etd_usage.value),
+ imx21->etd_usage.maximum);
+}
+
+static inline void debug_etd_freed(struct imx21 *imx21)
+{
+ imx21->etd_usage.value--;
+}
+
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value += size;
+ imx21->dmem_usage.maximum = max(
+ imx21->dmem_usage.value,
+ imx21->dmem_usage.maximum);
+}
+
+static inline void debug_dmem_freed(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value -= size;
+}
+
+
+static void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td)
+{
+ struct debug_isoc_trace *trace = &imx21->isoc_trace[
+ imx21->isoc_trace_index++];
+
+ imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
+ trace->schedule_frame = td->frame;
+ trace->submit_frame = frame;
+ trace->request_len = td->len;
+ trace->td = td;
+}
+
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len)
+{
+ struct debug_isoc_trace *trace, *trace_failed;
+ int i;
+ int found = 0;
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
+ if (trace->td == td) {
+ trace->done_frame = frame;
+ trace->done_len = len;
+ trace->cc = cc;
+ trace->td = NULL;
+ found = 1;
+ break;
+ }
+ }
+
+ if (found && cc) {
+ trace_failed = &imx21->isoc_trace_failed[
+ imx21->isoc_trace_index_failed++];
+
+ imx21->isoc_trace_index_failed %= ARRAY_SIZE(
+ imx21->isoc_trace_failed);
+ *trace_failed = *trace;
+ }
+}
+
+
+static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
+{
+ if (ep)
+ snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
+ ep->desc.bEndpointAddress,
+ usb_endpoint_type(&ep->desc),
+ ep);
+ else
+ snprintf(buf, bufsize, "none");
+ return buf;
+}
+
+static char *format_etd_dword0(u32 value, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize,
+ "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
+ value & 0x7F,
+ (value >> DW0_ENDPNT) & 0x0F,
+ dir_labels[(value >> DW0_DIRECT) & 0x03],
+ speed_labels[(value >> DW0_SPEED) & 0x01],
+ format_labels[(value >> DW0_FORMAT) & 0x03],
+ (value >> DW0_HALTED) & 0x01);
+ return buf;
+}
+
+static int debug_status_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ int etds_allocated = 0;
+ int etds_sw_busy = 0;
+ int etds_hw_busy = 0;
+ int dmem_blocks = 0;
+ int queued_for_etd = 0;
+ int queued_for_dmem = 0;
+ unsigned int dmem_bytes = 0;
+ int i;
+ struct etd_priv *etd;
+ u32 etd_enable_mask;
+ unsigned long flags;
+ struct imx21_dmem_area *dmem;
+ struct ep_priv *ep_priv;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc)
+ etds_allocated++;
+ if (etd->urb)
+ etds_sw_busy++;
+ if (etd_enable_mask & (1<<i))
+ etds_hw_busy++;
+ }
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list) {
+ dmem_bytes += dmem->size;
+ dmem_blocks++;
+ }
+
+ list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
+ queued_for_etd++;
+
+ list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
+ queued_for_dmem++;
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ seq_printf(s,
+ "Frame: %d\n"
+ "ETDs allocated: %d/%d (max=%d)\n"
+ "ETDs in use sw: %d\n"
+ "ETDs in use hw: %d\n"
+ "DMEM alocated: %d/%d (max=%d)\n"
+ "DMEM blocks: %d\n"
+ "Queued waiting for ETD: %d\n"
+ "Queued waiting for DMEM: %d\n",
+ readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
+ etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
+ etds_sw_busy,
+ etds_hw_busy,
+ dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
+ dmem_blocks,
+ queued_for_etd,
+ queued_for_dmem);
+
+ return 0;
+}
+
+static int debug_dmem_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct imx21_dmem_area *dmem;
+ unsigned long flags;
+ char ep_text[40];
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list)
+ seq_printf(s,
+ "%04X: size=0x%X "
+ "ep=%s\n",
+ dmem->offset, dmem->size,
+ format_ep(dmem->ep, ep_text, sizeof(ep_text)));
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_etd_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct etd_priv *etd;
+ char buf[60];
+ u32 dword;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ int state = -1;
+ struct urb_priv *urb_priv;
+ if (etd->urb) {
+ urb_priv = etd->urb->hcpriv;
+ if (urb_priv)
+ state = urb_priv->state;
+ }
+
+ seq_printf(s,
+ "etd_num: %d\n"
+ "ep: %s\n"
+ "alloc: %d\n"
+ "len: %d\n"
+ "busy sw: %d\n"
+ "busy hw: %d\n"
+ "urb state: %d\n"
+ "current urb: %p\n",
+
+ i,
+ format_ep(etd->ep, buf, sizeof(buf)),
+ etd->alloc,
+ etd->len,
+ etd->urb != NULL,
+ (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
+ state,
+ etd->urb);
+
+ for (j = 0; j < 4; j++) {
+ dword = etd_readl(imx21, i, j);
+ switch (j) {
+ case 0:
+ format_etd_dword0(dword, buf, sizeof(buf));
+ break;
+ case 2:
+ snprintf(buf, sizeof(buf),
+ "cc=0X%02X", dword >> DW2_COMPCODE);
+ break;
+ default:
+ *buf = 0;
+ break;
+ }
+ seq_printf(s,
+ "dword %d: submitted=%08X cur=%08X [%s]\n",
+ j,
+ etd->submitted_dwords[j],
+ dword,
+ buf);
+ }
+ seq_printf(s, "\n");
+ }
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_statistics_show_one(struct seq_file *s,
+ const char *name, struct debug_stats *stats)
+{
+ seq_printf(s, "%s:\n"
+ "submitted URBs: %lu\n"
+ "completed OK: %lu\n"
+ "completed failed: %lu\n"
+ "unlinked: %lu\n"
+ "queued for ETD: %lu\n"
+ "queued for DMEM: %lu\n\n",
+ name,
+ stats->submitted,
+ stats->completed_ok,
+ stats->completed_failed,
+ stats->unlinked,
+ stats->queue_etd,
+ stats->queue_dmem);
+}
+
+static int debug_statistics_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
+ debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
+ seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_isoc_show_one(struct seq_file *s,
+ const char *name, int index, struct debug_isoc_trace *trace)
+{
+ seq_printf(s, "%s %d:\n"
+ "cc=0X%02X\n"
+ "scheduled frame %d (%d)\n"
+ "submittted frame %d (%d)\n"
+ "completed frame %d (%d)\n"
+ "requested length=%d\n"
+ "completed length=%d\n\n",
+ name, index,
+ trace->cc,
+ trace->schedule_frame, trace->schedule_frame & 0xFFFF,
+ trace->submit_frame, trace->submit_frame & 0xFFFF,
+ trace->done_frame, trace->done_frame & 0xFFFF,
+ trace->request_len,
+ trace->done_len);
+}
+
+static int debug_isoc_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct debug_isoc_trace *trace;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ trace = imx21->isoc_trace_failed;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
+ debug_isoc_show_one(s, "isoc failed", i, trace);
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
+ debug_isoc_show_one(s, "isoc", i, trace);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_status_show, inode->i_private);
+}
+
+static int debug_dmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_dmem_show, inode->i_private);
+}
+
+static int debug_etd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_etd_show, inode->i_private);
+}
+
+static int debug_statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_statistics_show, inode->i_private);
+}
+
+static int debug_isoc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_isoc_show, inode->i_private);
+}
+
+static const struct file_operations debug_status_fops = {
+ .open = debug_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_dmem_fops = {
+ .open = debug_dmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_etd_fops = {
+ .open = debug_etd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_statistics_fops = {
+ .open = debug_statistics_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_isoc_fops = {
+ .open = debug_isoc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void create_debug_files(struct imx21 *imx21)
+{
+ imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ if (!imx21->debug_root)
+ goto failed_create_rootdir;
+
+ if (!debugfs_create_file("status", S_IRUGO,
+ imx21->debug_root, imx21, &debug_status_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("dmem", S_IRUGO,
+ imx21->debug_root, imx21, &debug_dmem_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("etd", S_IRUGO,
+ imx21->debug_root, imx21, &debug_etd_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("statistics", S_IRUGO,
+ imx21->debug_root, imx21, &debug_statistics_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("isoc", S_IRUGO,
+ imx21->debug_root, imx21, &debug_isoc_fops))
+ goto failed_create;
+
+ return;
+
+failed_create:
+ debugfs_remove_recursive(imx21->debug_root);
+
+failed_create_rootdir:
+ imx21->debug_root = NULL;
+}
+
+
+static void remove_debug_files(struct imx21 *imx21)
+{
+ if (imx21->debug_root) {
+ debugfs_remove_recursive(imx21->debug_root);
+ imx21->debug_root = NULL;
+ }
+}
+
+#endif
+
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 0000000..213e270
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1789 @@
+/*
+ * USB Host Controller Driver for IMX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+ /*
+ * The i.MX21 USB hardware contains
+ * * 32 transfer descriptors (called ETDs)
+ * * 4Kb of Data memory
+ *
+ * The data memory is shared between the host and fuction controlers
+ * (but this driver only supports the host controler)
+ *
+ * So setting up a transfer involves:
+ * * Allocating a ETD
+ * * Fill in ETD with appropriate information
+ * * Allocating data memory (and putting the offset in the ETD)
+ * * Activate the ETD
+ * * Get interrupt when done.
+ *
+ * An ETD is assigned to each active endpoint.
+ *
+ * Low resource (ETD and Data memory) situations are handled differently for
+ * isochronous and non insosynchronous transactions :
+ *
+ * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
+ *
+ * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
+ * They allocate both ETDs and Data memory during URB submission
+ * (and fail if unavailable).
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+
+#include "../core/hcd.h"
+#include "imx21-hcd.h"
+
+#ifdef DEBUG
+#define DEBUG_LOG_FRAME(imx21, etd, event) \
+ (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
+#else
+#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
+#endif
+
+static const char hcd_name[] = "imx21-hcd";
+
+static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
+{
+ return (struct imx21 *)hcd->hcd_priv;
+}
+
+
+/* =========================================== */
+/* Hardware access helpers */
+/* =========================================== */
+
+static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) | mask, reg);
+}
+
+static inline void clear_register_bits(struct imx21 *imx21,
+ u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) & ~mask, reg);
+}
+
+static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (readl(reg) & mask)
+ writel(mask, reg);
+}
+
+static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (!(readl(reg) & mask))
+ writel(mask, reg);
+}
+
+static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
+{
+ writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
+{
+ return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static inline int wrap_frame(int counter)
+{
+ return counter & 0xFFFF;
+}
+
+static inline int frame_after(int frame, int after)
+{
+ /* handle wrapping like jiffies time_afer */
+ return (s16)((s16)after - (s16)frame) < 0;
+}
+
+static int imx21_hc_get_frame(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+
+ return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
+}
+
+
+#include "imx21-dbg.c"
+
+/* =========================================== */
+/* ETD management */
+/* =========================================== */
+
+static int alloc_etd(struct imx21 *imx21)
+{
+ int i;
+ struct etd_priv *etd = imx21->etd;
+
+ for (i = 0; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc == 0) {
+ memset(etd, 0, sizeof(imx21->etd[0]));
+ etd->alloc = 1;
+ debug_etd_allocated(imx21);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void disactivate_etd(struct imx21 *imx21, int num)
+{
+ int etd_mask = (1 << num);
+ struct etd_priv *etd = &imx21->etd[num];
+
+ writel(etd_mask, imx21->regs + USBH_ETDENCLR);
+ clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+
+ etd->active_count = 0;
+
+ DEBUG_LOG_FRAME(imx21, etd, disactivated);
+}
+
+static void reset_etd(struct imx21 *imx21, int num)
+{
+ struct etd_priv *etd = imx21->etd + num;
+ int i;
+
+ disactivate_etd(imx21, num);
+
+ for (i = 0; i < 4; i++)
+ etd_writel(imx21, num, i, 0);
+ etd->urb = NULL;
+ etd->ep = NULL;
+ etd->td = NULL;;
+}
+
+static void free_etd(struct imx21 *imx21, int num)
+{
+ if (num < 0)
+ return;
+
+ if (num >= USB_NUM_ETD) {
+ dev_err(imx21->dev, "BAD etd=%d!\n", num);
+ return;
+ }
+ if (imx21->etd[num].alloc == 0) {
+ dev_err(imx21->dev, "ETD %d already free!\n", num);
+ return;
+ }
+
+ debug_etd_freed(imx21);
+ reset_etd(imx21, num);
+ memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
+}
+
+
+static void setup_etd_dword0(struct imx21 *imx21,
+ int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
+{
+ etd_writel(imx21, etd_num, 0,
+ ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
+ ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
+ ((u32) dir << DW0_DIRECT) |
+ ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
+ 1 : 0) << DW0_SPEED) |
+ ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
+ ((u32) maxpacket << DW0_MAXPKTSIZ));
+}
+
+static void activate_etd(struct imx21 *imx21,
+ int etd_num, dma_addr_t dma, u8 dir)
+{
+ u32 etd_mask = 1 << etd_num;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+ set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+ if (dma) {
+ set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
+ clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
+ writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
+ set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
+ } else {
+ if (dir != TD_DIR_IN) {
+ /* need to set for ZLP */
+ set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ }
+
+ DEBUG_LOG_FRAME(imx21, etd, activated);
+
+#ifdef DEBUG
+ if (!etd->active_count) {
+ int i;
+ etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
+ etd->disactivated_frame = -1;
+ etd->last_int_frame = -1;
+ etd->last_req_frame = -1;
+
+ for (i = 0; i < 4; i++)
+ etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
+ }
+#endif
+
+ etd->active_count = 1;
+ writel(etd_mask, imx21->regs + USBH_ETDENSET);
+}
+
+/* =========================================== */
+/* Data memory management */
+/* =========================================== */
+
+static int alloc_dmem(struct imx21 *imx21, unsigned int size,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int offset = 0;
+ struct imx21_dmem_area *area;
+ struct imx21_dmem_area *tmp;
+
+ size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
+
+ if (size > DMEM_SIZE) {
+ dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
+ size, DMEM_SIZE);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp, &imx21->dmem_list, list) {
+ if ((size + offset) < offset)
+ goto fail;
+ if ((size + offset) <= tmp->offset)
+ break;
+ offset = tmp->size + tmp->offset;
+ if ((offset + size) > DMEM_SIZE)
+ goto fail;
+ }
+
+ area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
+ if (area == NULL)
+ return -ENOMEM;
+
+ area->ep = ep;
+ area->offset = offset;
+ area->size = size;
+ list_add_tail(&area->list, &tmp->list);
+ debug_dmem_allocated(imx21, size);
+ return offset;
+
+fail:
+ return -ENOMEM;
+}
+
+/* Memory now available for a queued ETD - activate it */
+static void activate_queued_etd(struct imx21 *imx21,
+ struct etd_priv *etd, u32 dmem_offset)
+{
+ struct urb_priv *urb_priv = etd->urb->hcpriv;
+ int etd_num = etd - &imx21->etd[0];
+ u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
+ u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
+
+ dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
+ etd_num);
+ etd_writel(imx21, etd_num, 1,
+ ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
+
+ urb_priv->active = 1;
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+}
+
+static void free_dmem(struct imx21 *imx21, int offset)
+{
+ struct imx21_dmem_area *area;
+ struct etd_priv *etd, *tmp;
+ int found = 0;
+
+ list_for_each_entry(area, &imx21->dmem_list, list) {
+ if (area->offset == offset) {
+ debug_dmem_freed(imx21, area->size);
+ list_del(&area->list);
+ kfree(area);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(imx21->dev,
+ "Trying to free unallocated DMEM %d\n", offset);
+ return;
+ }
+
+ /* Try again to allocate memory for anything we've queued */
+ list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
+ offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
+ if (offset >= 0) {
+ list_del(&etd->queue);
+ activate_queued_etd(imx21, etd, (u32)offset);
+ }
+ }
+}
+
+static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct imx21_dmem_area *area, *tmp;
+
+ list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
+ if (area->ep == ep) {
+ dev_err(imx21->dev,
+ "Active DMEM %d for disabled ep=%p\n",
+ area->offset, ep);
+ list_del(&area->list);
+ kfree(area);
+ }
+ }
+}
+
+
+/* =========================================== */
+/* End handling */
+/* =========================================== */
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+
+/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+ int etd_num;
+ int i;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ continue;
+
+ ep_priv->etd[i] = -1;
+ if (list_empty(&imx21->queue_for_etd)) {
+ free_etd(imx21, etd_num);
+ continue;
+ }
+
+ dev_dbg(imx21->dev,
+ "assigning idle etd %d for queued request\n", etd_num);
+ ep_priv = list_first_entry(&imx21->queue_for_etd,
+ struct ep_priv, queue);
+ list_del(&ep_priv->queue);
+ reset_etd(imx21, etd_num);
+ ep_priv->waiting_etd = 0;
+ ep_priv->etd[i] = etd_num;
+
+ if (list_empty(&ep_priv->ep->urb_list)) {
+ dev_err(imx21->dev, "No urb for queued ep!\n");
+ continue;
+ }
+ schedule_nonisoc_etd(imx21, list_first_entry(
+ &ep_priv->ep->urb_list, struct urb, urb_list));
+ }
+}
+
+static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
+__releases(imx21->lock)
+__acquires(imx21->lock)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = urb->ep->hcpriv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ debug_urb_completed(imx21, urb, status);
+ dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
+
+ kfree(urb_priv->isoc_td);
+ kfree(urb->hcpriv);
+ urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&imx21->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&imx21->lock);
+ if (list_empty(&ep_priv->ep->urb_list))
+ ep_idle(imx21, ep_priv);
+}
+
+/* =========================================== */
+/* ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_isoc_etds(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = ep->hcpriv;
+ struct etd_priv *etd;
+ struct urb_priv *urb_priv;
+ struct td *td;
+ int etd_num;
+ int i;
+ int cur_frame;
+ u8 dir;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+too_late:
+ if (list_empty(&ep_priv->td_list))
+ break;
+
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ break;
+
+ etd = &imx21->etd[etd_num];
+ if (etd->urb)
+ continue;
+
+ td = list_entry(ep_priv->td_list.next, struct td, list);
+ list_del(&td->list);
+ urb_priv = td->urb->hcpriv;
+
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (frame_after(cur_frame, td->frame)) {
+ dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
+ cur_frame, td->frame);
+ urb_priv->isoc_status = -EXDEV;
+ td->urb->iso_frame_desc[
+ td->isoc_index].actual_length = 0;
+ td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, td->urb, urb_priv->isoc_status);
+ goto too_late;
+ }
+
+ urb_priv->active = 1;
+ etd->td = td;
+ etd->ep = td->ep;
+ etd->urb = td->urb;
+ etd->len = td->len;
+
+ debug_isoc_submitted(imx21, cur_frame, td);
+
+ dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
+ etd_writel(imx21, etd_num, 1, etd->dmem_offset);
+ etd_writel(imx21, etd_num, 2,
+ (TD_NOTACCESSED << DW2_COMPCODE) |
+ ((td->frame & 0xFFFF) << DW2_STARTFRM));
+ etd_writel(imx21, etd_num, 3,
+ (TD_NOTACCESSED << DW3_COMPCODE0) |
+ (td->len << DW3_PKTLEN0));
+
+ activate_etd(imx21, etd_num, td->data, dir);
+ }
+}
+
+static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct etd_priv *etd = imx21->etd + etd_num;
+ struct td *td = etd->td;
+ struct usb_host_endpoint *ep = etd->ep;
+ int isoc_index = td->isoc_index;
+ unsigned int pipe = urb->pipe;
+ int dir_in = usb_pipein(pipe);
+ int cc;
+ int bytes_xfrd;
+
+ disactivate_etd(imx21, etd_num);
+
+ cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
+ bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
+
+ /* Input doesn't always fill the buffer, don't generate an error
+ * when this happens.
+ */
+ if (dir_in && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc == TD_NOTACCESSED)
+ bytes_xfrd = 0;
+
+ debug_isoc_completed(imx21,
+ imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
+ if (cc) {
+ urb_priv->isoc_status = -EXDEV;
+ dev_dbg(imx21->dev,
+ "bad iso cc=0x%X frame=%d sched frame=%d "
+ "cnt=%d len=%d urb=%p etd=%d index=%d\n",
+ cc, imx21_hc_get_frame(hcd), td->frame,
+ bytes_xfrd, td->len, urb, etd_num, isoc_index);
+ }
+
+ if (dir_in)
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+
+ urb->actual_length += bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
+
+ etd->td = NULL;
+ etd->urb = NULL;
+ etd->ep = NULL;
+
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, urb, urb_priv->isoc_status);
+
+ schedule_isoc_etds(hcd, ep);
+}
+
+static struct ep_priv *alloc_isoc_ep(
+ struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct ep_priv *ep_priv;
+ int i;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (ep_priv == NULL)
+ return NULL;
+
+ /* Allocate the ETDs */
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ ep_priv->etd[i] = alloc_etd(imx21);
+ if (ep_priv->etd[i] < 0) {
+ int j;
+ dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
+ for (j = 0; j < i; j++)
+ free_etd(imx21, ep_priv->etd[j]);
+ goto alloc_etd_failed;
+ }
+ imx21->etd[ep_priv->etd[i]].ep = ep;
+ }
+
+ INIT_LIST_HEAD(&ep_priv->td_list);
+ ep_priv->ep = ep;
+ ep->hcpriv = ep_priv;
+ return ep_priv;
+
+alloc_etd_failed:
+ kfree(ep_priv);
+ return NULL;
+}
+
+static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ struct td *td = NULL;
+ int i;
+ int ret;
+ int cur_frame;
+ u16 maxpacket;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (urb_priv == NULL)
+ return -ENOMEM;
+
+ urb_priv->isoc_td = kzalloc(
+ sizeof(struct td) * urb->number_of_packets, mem_flags);
+ if (urb_priv->isoc_td == NULL) {
+ ret = -ENOMEM;
+ goto alloc_td_failed;
+ }
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ if (ep->hcpriv == NULL) {
+ ep_priv = alloc_isoc_ep(imx21, ep);
+ if (ep_priv == NULL) {
+ ret = -ENOMEM;
+ goto alloc_ep_failed;
+ }
+ } else {
+ ep_priv = ep->hcpriv;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto link_failed;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ /* allocate data memory for largest packets if not already done */
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
+
+ if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
+ /* not sure if this can really occur.... */
+ dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
+ etd->dmem_size, maxpacket);
+ ret = -EMSGSIZE;
+ goto alloc_dmem_failed;
+ }
+
+ if (etd->dmem_size == 0) {
+ etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
+ if (etd->dmem_offset < 0) {
+ dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
+ ret = -EAGAIN;
+ goto alloc_dmem_failed;
+ }
+ etd->dmem_size = maxpacket;
+ }
+ }
+
+ /* calculate frame */
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ if (list_empty(&ep_priv->td_list))
+ urb->start_frame = cur_frame + 5;
+ else
+ urb->start_frame = list_entry(
+ ep_priv->td_list.prev,
+ struct td, list)->frame + urb->interval;
+ }
+ urb->start_frame = wrap_frame(urb->start_frame);
+ if (frame_after(cur_frame, urb->start_frame)) {
+ dev_dbg(imx21->dev,
+ "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+ urb->start_frame, cur_frame,
+ (urb->transfer_flags & URB_ISO_ASAP) != 0);
+ urb->start_frame = wrap_frame(cur_frame + 1);
+ }
+
+ /* set up transfers */
+ td = urb_priv->isoc_td;
+ for (i = 0; i < urb->number_of_packets; i++, td++) {
+ td->ep = ep;
+ td->urb = urb;
+ td->len = urb->iso_frame_desc[i].length;
+ td->isoc_index = i;
+ td->frame = wrap_frame(urb->start_frame + urb->interval * i);
+ td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
+ list_add_tail(&td->list, &ep_priv->td_list);
+ }
+
+ urb_priv->isoc_remaining = urb->number_of_packets;
+ dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
+ urb->number_of_packets, urb->start_frame, td->frame);
+
+ debug_urb_submitted(imx21, urb);
+ schedule_isoc_etds(hcd, ep);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+alloc_dmem_failed:
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+link_failed:
+alloc_ep_failed:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv->isoc_td);
+
+alloc_td_failed:
+ kfree(urb_priv);
+ return ret;
+}
+
+static void dequeue_isoc_urb(struct imx21 *imx21,
+ struct urb *urb, struct ep_priv *ep_priv)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct td *td, *tmp;
+ int i;
+
+ if (urb_priv->active) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ int etd_num = ep_priv->etd[i];
+ if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
+ struct etd_priv *etd = imx21->etd + etd_num;
+
+ reset_etd(imx21, etd_num);
+ if (etd->dmem_size)
+ free_dmem(imx21, etd->dmem_offset);
+ etd->dmem_size = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
+ if (td->urb == urb) {
+ dev_vdbg(imx21->dev, "removing td %p\n", td);
+ list_del(&td->list);
+ }
+ }
+}
+
+/* =========================================== */
+/* NON ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
+{
+ unsigned int pipe = urb->pipe;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
+ int state = urb_priv->state;
+ int etd_num = ep_priv->etd[0];
+ struct etd_priv *etd;
+ int dmem_offset;
+ u32 count;
+ u16 etd_buf_size;
+ u16 maxpacket;
+ u8 dir;
+ u8 bufround;
+ u8 datatoggle;
+ u8 interval = 0;
+ u8 relpolpos = 0;
+
+ if (etd_num < 0) {
+ dev_err(imx21->dev, "No valid ETD\n");
+ return;
+ }
+ if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
+ dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
+
+ etd = &imx21->etd[etd_num];
+ maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+ if (!maxpacket)
+ maxpacket = 8;
+
+ if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
+ if (state == US_CTRL_SETUP) {
+ dir = TD_DIR_SETUP;
+ etd->dma_handle = urb->setup_dma;
+ bufround = 0;
+ count = 8;
+ datatoggle = TD_TOGGLE_DATA0;
+ } else { /* US_CTRL_ACK */
+ dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
+ etd->dma_handle = urb->transfer_dma;
+ bufround = 0;
+ count = 0;
+ datatoggle = TD_TOGGLE_DATA1;
+ }
+ } else {
+ dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ bufround = (dir == TD_DIR_IN) ? 1 : 0;
+ etd->dma_handle = urb->transfer_dma;
+ if (usb_pipebulk(pipe) && (state == US_BULK0))
+ count = 0;
+ else
+ count = urb->transfer_buffer_length;
+
+ if (usb_pipecontrol(pipe)) {
+ datatoggle = TD_TOGGLE_DATA1;
+ } else {
+ if (usb_gettoggle(
+ urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe)))
+ datatoggle = TD_TOGGLE_DATA1;
+ else
+ datatoggle = TD_TOGGLE_DATA0;
+ }
+ }
+
+ etd->urb = urb;
+ etd->ep = urb_priv->ep;
+ etd->len = count;
+
+ if (usb_pipeint(pipe)) {
+ interval = urb->interval;
+ relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
+ }
+
+ /* Write ETD to device memory */
+ setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
+
+ etd_writel(imx21, etd_num, 2,
+ (u32) interval << DW2_POLINTERV |
+ ((u32) relpolpos << DW2_RELPOLPOS) |
+ ((u32) dir << DW2_DIRPID) |
+ ((u32) bufround << DW2_BUFROUND) |
+ ((u32) datatoggle << DW2_DATATOG) |
+ ((u32) TD_NOTACCESSED << DW2_COMPCODE));
+
+ /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
+ is smaller. Make sure we don't overrun the buffer!
+ */
+ if (count && count < maxpacket)
+ etd_buf_size = count;
+ else
+ etd_buf_size = maxpacket;
+
+ etd_writel(imx21, etd_num, 3,
+ ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
+
+ if (!count)
+ etd->dma_handle = 0;
+
+ /* allocate x and y buffer space at once */
+ etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
+ dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
+ if (dmem_offset < 0) {
+ /* Setup everything we can in HW and update when we get DMEM */
+ etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
+
+ dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
+ debug_urb_queued_for_dmem(imx21, urb);
+ list_add_tail(&etd->queue, &imx21->queue_for_dmem);
+ return;
+ }
+
+ etd_writel(imx21, etd_num, 1,
+ (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
+ (u32) dmem_offset);
+
+ urb_priv->active = 1;
+
+ /* enable the ETD to kick off transfer */
+ dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
+ etd_num, count, dir != TD_DIR_IN ? "out" : "in");
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+
+}
+
+static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct etd_priv *etd = &imx21->etd[etd_num];
+ u32 etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int dir;
+ u16 xbufaddr;
+ int cc;
+ u32 bytes_xfrd;
+ int etd_done;
+
+ disactivate_etd(imx21, etd_num);
+
+ dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
+ xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
+ cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
+ bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
+
+ /* save toggle carry */
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe),
+ (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
+
+ if (dir == TD_DIR_IN) {
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ free_dmem(imx21, xbufaddr);
+
+ urb->error_count = 0;
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc != 0)
+ dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
+
+ etd_done = (cc_to_error[cc] != 0); /* stop if error */
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ switch (urb_priv->state) {
+ case US_CTRL_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ urb_priv->state = US_CTRL_DATA;
+ else
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_DATA:
+ urb->actual_length += bytes_xfrd;
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_ACK:
+ etd_done = 1;
+ break;
+ default:
+ dev_err(imx21->dev,
+ "Invalid pipe state %d\n", urb_priv->state);
+ etd_done = 1;
+ break;
+ }
+ break;
+
+ case PIPE_BULK:
+ urb->actual_length += bytes_xfrd;
+ if ((urb_priv->state == US_BULK)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && urb->transfer_buffer_length > 0
+ && ((urb->transfer_buffer_length %
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe))) == 0)) {
+ /* need a 0-packet */
+ urb_priv->state = US_BULK0;
+ } else {
+ etd_done = 1;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ urb->actual_length += bytes_xfrd;
+ etd_done = 1;
+ break;
+ }
+
+ if (!etd_done) {
+ dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
+ schedule_nonisoc_etd(imx21, urb);
+ } else {
+ struct usb_host_endpoint *ep = urb->ep;
+
+ urb_done(hcd, urb, cc_to_error[cc]);
+ etd->urb = NULL;
+
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list,
+ struct urb, urb_list);
+ dev_vdbg(imx21->dev, "next URB %p\n", urb);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+ }
+}
+
+static struct ep_priv *alloc_ep(void)
+{
+ int i;
+ struct ep_priv *ep_priv;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (!ep_priv)
+ return NULL;
+
+ for (i = 0; i < NUM_ISO_ETDS; ++i)
+ ep_priv->etd[i] = -1;
+
+ return ep_priv;
+}
+
+static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+ struct ep_priv *ep_priv;
+ struct etd_priv *etd;
+ int ret;
+ unsigned long flags;
+ int new_ep = 0;
+
+ dev_vdbg(imx21->dev,
+ "enqueue urb=%p ep=%p len=%d "
+ "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
+ urb, ep,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma,
+ urb->setup_packet, urb->setup_dma);
+
+ if (usb_pipeisoc(urb->pipe))
+ return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ep_priv = ep->hcpriv;
+ if (ep_priv == NULL) {
+ ep_priv = alloc_ep();
+ if (!ep_priv) {
+ ret = -ENOMEM;
+ goto failed_alloc_ep;
+ }
+ ep->hcpriv = ep_priv;
+ ep_priv->ep = ep;
+ new_ep = 1;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto failed_link;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ urb_priv->state = US_CTRL_SETUP;
+ break;
+ case PIPE_BULK:
+ urb_priv->state = US_BULK;
+ break;
+ }
+
+ debug_urb_submitted(imx21, urb);
+ if (ep_priv->etd[0] < 0) {
+ if (ep_priv->waiting_etd) {
+ dev_dbg(imx21->dev,
+ "no ETD available already queued %p\n",
+ ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ goto out;
+ }
+ ep_priv->etd[0] = alloc_etd(imx21);
+ if (ep_priv->etd[0] < 0) {
+ dev_dbg(imx21->dev,
+ "no ETD available queueing %p\n", ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
+ ep_priv->waiting_etd = 1;
+ goto out;
+ }
+ }
+
+ /* Schedule if no URB already active for this endpoint */
+ etd = &imx21->etd[ep_priv->etd[0]];
+ if (etd->urb == NULL) {
+ DEBUG_LOG_FRAME(imx21, etd, last_req);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+
+out:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+failed_link:
+failed_alloc_ep:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv);
+ return ret;
+}
+
+static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct usb_host_endpoint *ep;
+ struct ep_priv *ep_priv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int ret = -EINVAL;
+
+ dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
+ urb, usb_pipeisoc(urb->pipe), status);
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto fail;
+ ep = urb_priv->ep;
+ ep_priv = ep->hcpriv;
+
+ debug_urb_unlinked(imx21, urb);
+
+ if (usb_pipeisoc(urb->pipe)) {
+ dequeue_isoc_urb(imx21, urb, ep_priv);
+ schedule_isoc_etds(hcd, ep);
+ } else if (urb_priv->active) {
+ int etd_num = ep_priv->etd[0];
+ if (etd_num != -1) {
+ disactivate_etd(imx21, etd_num);
+ free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
+ imx21->etd[etd_num].urb = NULL;
+ }
+ }
+
+ urb_done(hcd, urb, status);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+fail:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return ret;
+}
+
+/* =========================================== */
+/* Interrupt dispatch */
+/* =========================================== */
+
+static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
+{
+ int etd_num;
+ int enable_sof_int = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
+ u32 etd_mask = 1 << etd_num;
+ u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
+ u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+
+ if (done) {
+ DEBUG_LOG_FRAME(imx21, etd, last_int);
+ } else {
+/*
+ * Kludge warning!
+ *
+ * When multiple transfers are using the bus we sometimes get into a state
+ * where the transfer has completed (the CC field of the ETD is != 0x0F),
+ * the ETD has self disabled but the ETDDONESTAT flag is not set
+ * (and hence no interrupt occurs).
+ * This causes the transfer in question to hang.
+ * The kludge below checks for this condition at each SOF and processes any
+ * blocked ETDs (after an arbitary 10 frame wait)
+ *
+ * With a single active transfer the usbtest test suite will run for days
+ * without the kludge.
+ * With other bus activity (eg mass storage) even just test1 will hang without
+ * the kludge.
+ */
+ u32 dword0;
+ int cc;
+
+ if (etd->active_count && !enabled) /* suspicious... */
+ enable_sof_int = 1;
+
+ if (!sof || enabled || !etd->active_count)
+ continue;
+
+ cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
+ if (cc == TD_NOTACCESSED)
+ continue;
+
+ if (++etd->active_count < 10)
+ continue;
+
+ dword0 = etd_readl(imx21, etd_num, 0);
+ dev_dbg(imx21->dev,
+ "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
+ etd_num, dword0 & 0x7F,
+ (dword0 >> DW0_ENDPNT) & 0x0F,
+ cc);
+
+#ifdef DEBUG
+ dev_dbg(imx21->dev,
+ "frame: act=%d disact=%d"
+ " int=%d req=%d cur=%d\n",
+ etd->activated_frame,
+ etd->disactivated_frame,
+ etd->last_int_frame,
+ etd->last_req_frame,
+ readl(imx21->regs + USBH_FRMNUB));
+ imx21->debug_unblocks++;
+#endif
+ etd->active_count = 0;
+/* End of kludge */
+ }
+
+ if (etd->ep == NULL || etd->urb == NULL) {
+ dev_dbg(imx21->dev,
+ "Interrupt for unexpected etd %d"
+ " ep=%p urb=%p\n",
+ etd_num, etd->ep, etd->urb);
+ disactivate_etd(imx21, etd_num);
+ continue;
+ }
+
+ if (usb_pipeisoc(etd->urb->pipe))
+ isoc_etd_done(hcd, etd->urb, etd_num);
+ else
+ nonisoc_etd_done(hcd, etd->urb, etd_num);
+ }
+
+ /* only enable SOF interrupt if it may be needed for the kludge */
+ if (enable_sof_int)
+ set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+ else
+ clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+static irqreturn_t imx21_irq(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ u32 ints = readl(imx21->regs + USBH_SYSISR);
+
+ if (ints & USBH_SYSIEN_HERRINT)
+ dev_dbg(imx21->dev, "Scheduling error\n");
+
+ if (ints & USBH_SYSIEN_SORINT)
+ dev_dbg(imx21->dev, "Scheduling overrun\n");
+
+ if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
+ process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
+
+ writel(ints, imx21->regs + USBH_SYSISR);
+ return IRQ_HANDLED;
+}
+
+static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ int i;
+
+ if (ep == NULL)
+ return;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ep_priv = ep->hcpriv;
+ dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
+
+ if (!list_empty(&ep->urb_list))
+ dev_dbg(imx21->dev, "ep's URB list is not empty\n");
+
+ if (ep_priv != NULL) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ if (ep_priv->etd[i] > -1)
+ dev_dbg(imx21->dev, "free etd %d for disable\n",
+ ep_priv->etd[i]);
+
+ free_etd(imx21, ep_priv->etd[i]);
+ }
+ kfree(ep_priv);
+ ep->hcpriv = NULL;
+ }
+
+ for (i = 0; i < USB_NUM_ETD; i++) {
+ if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
+ dev_err(imx21->dev,
+ "Active etd %d for disabled ep=%p!\n", i, ep);
+ free_etd(imx21, i);
+ }
+ }
+ free_epdmem(imx21, ep);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Hub handling */
+/* =========================================== */
+
+static int get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ desc->bDescriptorType = 0x29; /* HUB descriptor */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
+ 0x0002 | /* No power switching */
+ 0x0010 | /* No over current protection */
+ 0);
+
+ desc->bitmap[0] = 1 << 1;
+ desc->bitmap[1] = ~0;
+ return 0;
+}
+
+static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int ports;
+ int changed = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ports = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ if (ports > 7) {
+ ports = 7;
+ dev_err(imx21->dev, "ports %d > 7\n", ports);
+ }
+ for (i = 0; i < ports; i++) {
+ if (readl(imx21->regs + USBH_PORTSTAT(i)) &
+ (USBH_PORTSTAT_CONNECTSC |
+ USBH_PORTSTAT_PRTENBLSC |
+ USBH_PORTSTAT_PRTSTATSC |
+ USBH_PORTSTAT_OVRCURIC |
+ USBH_PORTSTAT_PRTRSTSC)) {
+
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ }
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ if (changed)
+ dev_info(imx21->dev, "Hub status changed\n");
+ return changed;
+}
+
+static int imx21_hc_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int rc = 0;
+ u32 status_write = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(imx21->dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ case ClearPortFeature:
+ dev_dbg(imx21->dev, "ClearPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(imx21->dev, " ENABLE\n");
+ status_write = USBH_PORTSTAT_CURCONST;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTOVRCURI;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_LSDEVCON;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(imx21->dev, " C_ENABLE\n");
+ status_write = USBH_PORTSTAT_PRTENBLSC;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(imx21->dev, " C_SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSTATSC;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(imx21->dev, " C_CONNECTION\n");
+ status_write = USBH_PORTSTAT_CONNECTSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
+ status_write = USBH_PORTSTAT_OVRCURIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(imx21->dev, " C_RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTSC;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case GetHubDescriptor:
+ dev_dbg(imx21->dev, "GetHubDescriptor\n");
+ rc = get_hub_descriptor(hcd, (void *)buf);
+ break;
+
+ case GetHubStatus:
+ dev_dbg(imx21->dev, " GetHubStatus\n");
+ *(__le32 *) buf = 0;
+ break;
+
+ case GetPortStatus:
+ dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
+ wIndex, USBH_PORTSTAT(wIndex - 1));
+ *(__le32 *) buf = readl(imx21->regs +
+ USBH_PORTSTAT(wIndex - 1));
+ break;
+
+ case SetHubFeature:
+ dev_dbg(imx21->dev, "SetHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case SetPortFeature:
+ dev_dbg(imx21->dev, "SetPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSUSPST;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_PRTPWRST;
+ break;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(imx21->dev, " RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTST;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ if (status_write)
+ writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
+ return rc;
+}
+
+/* =========================================== */
+/* Host controller management */
+/* =========================================== */
+
+static int imx21_hc_reset(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ /* Reset the Host controler modules */
+ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
+ USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
+ imx21->regs + USBOTG_RST_CTRL);
+
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
+ if (time_after(jiffies, timeout)) {
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ dev_err(imx21->dev, "timeout waiting for reset\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_irq(&imx21->lock);
+ schedule_timeout(1);
+ spin_lock_irq(&imx21->lock);
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+}
+
+static int __devinit imx21_hc_start(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ int i, j;
+ u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
+ u32 usb_control = 0;
+
+ hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
+ USBOTG_HWMODE_HOSTXCVR_MASK);
+ hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
+ USBOTG_HWMODE_OTGXCVR_MASK);
+
+ if (imx21->pdata->host1_txenoe)
+ usb_control |= USBCTRL_HOST1_TXEN_OE;
+
+ if (!imx21->pdata->host1_xcverless)
+ usb_control |= USBCTRL_HOST1_BYP_TLL;
+
+ if (imx21->pdata->otg_ext_xcvr)
+ usb_control |= USBCTRL_OTC_RCV_RXDP;
+
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
+ imx21->regs + USBOTG_CLK_CTRL);
+ writel(hw_mode, imx21->regs + USBOTG_HWMODE);
+ writel(usb_control, imx21->regs + USBCTRL);
+ writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
+ imx21->regs + USB_MISCCONTROL);
+
+ /* Clear the ETDs */
+ for (i = 0; i < USB_NUM_ETD; i++)
+ for (j = 0; j < 4; j++)
+ etd_writel(imx21, i, j, 0);
+
+ /* Take the HC out of reset */
+ writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
+ imx21->regs + USBH_HOST_CTRL);
+
+ /* Enable ports */
+ if (imx21->pdata->enable_otg_host)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(0));
+
+ if (imx21->pdata->enable_host1)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(1));
+
+ if (imx21->pdata->enable_host2)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(2));
+
+
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Enable host controller interrupts */
+ set_register_bits(imx21, USBH_SYSIEN,
+ USBH_SYSIEN_HERRINT |
+ USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
+ set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void imx21_hc_stop(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel(0, imx21->regs + USBH_SYSIEN);
+ clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+ clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
+ USBOTG_CLK_CTRL);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Driver glue */
+/* =========================================== */
+
+static struct hc_driver imx21_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "IMX21 USB Host Controller",
+ .hcd_priv_size = sizeof(struct imx21),
+
+ .flags = HCD_USB11,
+ .irq = imx21_irq,
+
+ .reset = imx21_hc_reset,
+ .start = imx21_hc_start,
+ .stop = imx21_hc_stop,
+
+ /* I/O requests */
+ .urb_enqueue = imx21_hc_urb_enqueue,
+ .urb_dequeue = imx21_hc_urb_dequeue,
+ .endpoint_disable = imx21_hc_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = imx21_hc_get_frame,
+
+ /* Root hub support */
+ .hub_status_data = imx21_hc_hub_status_data,
+ .hub_control = imx21_hc_hub_control,
+
+};
+
+static struct mx21_usbh_platform_data default_pdata = {
+ .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .enable_host1 = 1,
+ .enable_host2 = 1,
+ .enable_otg_host = 1,
+
+};
+
+static int imx21_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ remove_debug_files(imx21);
+ usb_remove_hcd(hcd);
+
+ if (res != NULL) {
+ clk_disable(imx21->clk);
+ clk_put(imx21->clk);
+ iounmap(imx21->regs);
+ release_mem_region(res->start, resource_size(res));
+ }
+
+ kfree(hcd);
+ return 0;
+}
+
+
+static int imx21_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct imx21 *imx21;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&imx21_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (hcd == NULL) {
+ dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
+ dev_name(&pdev->dev));
+ return -ENOMEM;
+ }
+
+ imx21 = hcd_to_imx21(hcd);
+ imx21->dev = &pdev->dev;
+ imx21->pdata = pdev->dev.platform_data;
+ if (!imx21->pdata)
+ imx21->pdata = &default_pdata;
+
+ spin_lock_init(&imx21->lock);
+ INIT_LIST_HEAD(&imx21->dmem_list);
+ INIT_LIST_HEAD(&imx21->queue_for_etd);
+ INIT_LIST_HEAD(&imx21->queue_for_dmem);
+ create_debug_files(imx21);
+
+ res = request_mem_region(res->start, resource_size(res), hcd_name);
+ if (!res) {
+ ret = -EBUSY;
+ goto failed_request_mem;
+ }
+
+ imx21->regs = ioremap(res->start, resource_size(res));
+ if (imx21->regs == NULL) {
+ dev_err(imx21->dev, "Cannot map registers\n");
+ ret = -ENOMEM;
+ goto failed_ioremap;
+ }
+
+ /* Enable clocks source */
+ imx21->clk = clk_get(imx21->dev, NULL);
+ if (IS_ERR(imx21->clk)) {
+ dev_err(imx21->dev, "no clock found\n");
+ ret = PTR_ERR(imx21->clk);
+ goto failed_clock_get;
+ }
+
+ ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
+ if (ret)
+ goto failed_clock_set;
+ ret = clk_enable(imx21->clk);
+ if (ret)
+ goto failed_clock_enable;
+
+ dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
+ (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret != 0) {
+ dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
+ goto failed_add_hcd;
+ }
+
+ return 0;
+
+failed_add_hcd:
+ clk_disable(imx21->clk);
+failed_clock_enable:
+failed_clock_set:
+ clk_put(imx21->clk);
+failed_clock_get:
+ iounmap(imx21->regs);
+failed_ioremap:
+ release_mem_region(res->start, res->end - res->start);
+failed_request_mem:
+ remove_debug_files(imx21);
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static struct platform_driver imx21_hcd_driver = {
+ .driver = {
+ .name = (char *)hcd_name,
+ },
+ .probe = imx21_probe,
+ .remove = imx21_remove,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init imx21_hcd_init(void)
+{
+ return platform_driver_register(&imx21_hcd_driver);
+}
+
+static void __exit imx21_hcd_cleanup(void)
+{
+ platform_driver_unregister(&imx21_hcd_driver);
+}
+
+module_init(imx21_hcd_init);
+module_exit(imx21_hcd_cleanup);
+
+MODULE_DESCRIPTION("i.MX21 USB Host controller");
+MODULE_AUTHOR("Martin Fuzzey");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 0000000..1b0d913
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
+/*
+ * Macros and prototypes for i.MX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_IMX21_HCD_H__
+#define __LINUX_IMX21_HCD_H__
+
+#include <mach/mx21-usbhost.h>
+
+#define NUM_ISO_ETDS 2
+#define USB_NUM_ETD 32
+#define DMEM_SIZE 4096
+
+/* Register definitions */
+#define USBOTG_HWMODE 0x00
+#define USBOTG_HWMODE_ANASDBEN (1 << 14)
+#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
+#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
+#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
+#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
+#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
+#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
+#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
+#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
+
+#define USBOTG_CINT_STAT 0x04
+#define USBOTG_CINT_STEN 0x08
+#define USBOTG_ASHNPINT (1 << 5)
+#define USBOTG_ASFCINT (1 << 4)
+#define USBOTG_ASHCINT (1 << 3)
+#define USBOTG_SHNPINT (1 << 2)
+#define USBOTG_FCINT (1 << 1)
+#define USBOTG_HCINT (1 << 0)
+
+#define USBOTG_CLK_CTRL 0x0c
+#define USBOTG_CLK_CTRL_FUNC (1 << 2)
+#define USBOTG_CLK_CTRL_HST (1 << 1)
+#define USBOTG_CLK_CTRL_MAIN (1 << 0)
+
+#define USBOTG_RST_CTRL 0x10
+#define USBOTG_RST_RSTI2C (1 << 15)
+#define USBOTG_RST_RSTCTRL (1 << 5)
+#define USBOTG_RST_RSTFC (1 << 4)
+#define USBOTG_RST_RSTFSKE (1 << 3)
+#define USBOTG_RST_RSTRH (1 << 2)
+#define USBOTG_RST_RSTHSIE (1 << 1)
+#define USBOTG_RST_RSTHC (1 << 0)
+
+#define USBOTG_FRM_INTVL 0x14
+#define USBOTG_FRM_REMAIN 0x18
+#define USBOTG_HNP_CSR 0x1c
+#define USBOTG_HNP_ISR 0x2c
+#define USBOTG_HNP_IEN 0x30
+
+#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
+#define USBOTG_I2C_XCVR_DEVAD 0x118
+#define USBOTG_I2C_SEQ_OP_REG 0x119
+#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
+#define USBOTG_I2C_OP_CTRL_REG 0x11b
+#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
+#define USBOTG_I2C_MASTER_INT_REG 0x11f
+
+#define USBH_HOST_CTRL 0x80
+#define USBH_HOST_CTRL_HCRESET (1 << 31)
+#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
+#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
+#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
+#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
+
+#define USBH_SYSISR 0x88
+#define USBH_SYSISR_PSCINT (1 << 6)
+#define USBH_SYSISR_FMOFINT (1 << 5)
+#define USBH_SYSISR_HERRINT (1 << 4)
+#define USBH_SYSISR_RESDETINT (1 << 3)
+#define USBH_SYSISR_SOFINT (1 << 2)
+#define USBH_SYSISR_DONEINT (1 << 1)
+#define USBH_SYSISR_SORINT (1 << 0)
+
+#define USBH_SYSIEN 0x8c
+#define USBH_SYSIEN_PSCINT (1 << 6)
+#define USBH_SYSIEN_FMOFINT (1 << 5)
+#define USBH_SYSIEN_HERRINT (1 << 4)
+#define USBH_SYSIEN_RESDETINT (1 << 3)
+#define USBH_SYSIEN_SOFINT (1 << 2)
+#define USBH_SYSIEN_DONEINT (1 << 1)
+#define USBH_SYSIEN_SORINT (1 << 0)
+
+#define USBH_XBUFSTAT 0x98
+#define USBH_YBUFSTAT 0x9c
+#define USBH_XYINTEN 0xa0
+#define USBH_XFILLSTAT 0xa8
+#define USBH_YFILLSTAT 0xac
+#define USBH_ETDENSET 0xc0
+#define USBH_ETDENCLR 0xc4
+#define USBH_IMMEDINT 0xcc
+#define USBH_ETDDONESTAT 0xd0
+#define USBH_ETDDONEEN 0xd4
+#define USBH_FRMNUB 0xe0
+#define USBH_LSTHRESH 0xe4
+
+#define USBH_ROOTHUBA 0xe8
+#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
+#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
+#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
+#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
+#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
+#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
+#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
+#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
+
+#define USBH_ROOTHUBB 0xec
+#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
+#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
+
+#define USBH_ROOTSTAT 0xf0
+#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
+#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
+#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
+#define USBH_ROOTSTAT_OVRCURI (1 << 1)
+#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
+
+#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
+#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
+#define USBH_PORTSTAT_OVRCURIC (1 << 19)
+#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
+#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
+#define USBH_PORTSTAT_CONNECTSC (1 << 16)
+#define USBH_PORTSTAT_LSDEVCON (1 << 9)
+#define USBH_PORTSTAT_PRTPWRST (1 << 8)
+#define USBH_PORTSTAT_PRTRSTST (1 << 4)
+#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
+#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
+#define USBH_PORTSTAT_PRTENABST (1 << 1)
+#define USBH_PORTSTAT_CURCONST (1 << 0)
+
+#define USB_DMAREV 0x800
+#define USB_DMAINTSTAT 0x804
+#define USB_DMAINTSTAT_EPERR (1 << 1)
+#define USB_DMAINTSTAT_ETDERR (1 << 0)
+
+#define USB_DMAINTEN 0x808
+#define USB_DMAINTEN_EPERRINTEN (1 << 1)
+#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
+
+#define USB_ETDDMAERSTAT 0x80c
+#define USB_EPDMAERSTAT 0x810
+#define USB_ETDDMAEN 0x820
+#define USB_EPDMAEN 0x824
+#define USB_ETDDMAXTEN 0x828
+#define USB_EPDMAXTEN 0x82c
+#define USB_ETDDMAENXYT 0x830
+#define USB_EPDMAENXYT 0x834
+#define USB_ETDDMABST4EN 0x838
+#define USB_EPDMABST4EN 0x83c
+
+#define USB_MISCCONTROL 0x840
+#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
+#define USB_MISCCONTROL_SKPRTRY (1 << 2)
+#define USB_MISCCONTROL_ARBMODE (1 << 1)
+#define USB_MISCCONTROL_FILTCC (1 << 0)
+
+#define USB_ETDDMACHANLCLR 0x848
+#define USB_EPDMACHANLCLR 0x84c
+#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
+#define USB_EPSMSA(x) (0x980 + ((x) * 4))
+#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
+#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
+
+#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
+#define DW0_ADDRESS 0
+#define DW0_ENDPNT 7
+#define DW0_DIRECT 11
+#define DW0_SPEED 13
+#define DW0_FORMAT 14
+#define DW0_MAXPKTSIZ 16
+#define DW0_HALTED 27
+#define DW0_TOGCRY 28
+#define DW0_SNDNAK 30
+
+#define DW1_XBUFSRTAD 0
+#define DW1_YBUFSRTAD 16
+
+#define DW2_RTRYDELAY 0
+#define DW2_POLINTERV 0
+#define DW2_STARTFRM 0
+#define DW2_RELPOLPOS 8
+#define DW2_DIRPID 16
+#define DW2_BUFROUND 18
+#define DW2_DELAYINT 19
+#define DW2_DATATOG 22
+#define DW2_ERRORCNT 24
+#define DW2_COMPCODE 28
+
+#define DW3_TOTBYECNT 0
+#define DW3_PKTLEN0 0
+#define DW3_COMPCODE0 12
+#define DW3_PKTLEN1 16
+#define DW3_BUFSIZE 21
+#define DW3_COMPCODE1 28
+
+#define USBCTRL 0x600
+#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
+#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
+#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
+#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
+#define USBCTRL_I2C_WU_INT_EN (1 << 19)
+#define USBCTRL_OTG_WU_INT_EN (1 << 18)
+#define USBCTRL_HOST_WU_INT_EN (1 << 17)
+#define USBCTRL_FNT_WU_INT_EN (1 << 16)
+#define USBCTRL_OTC_RCV_RXDP (1 << 13)
+#define USBCTRL_HOST1_BYP_TLL (1 << 12)
+#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
+#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
+#define USBCTRL_OTG_PWR_MASK (1 << 6)
+#define USBCTRL_HOST1_PWR_MASK (1 << 5)
+#define USBCTRL_HOST2_PWR_MASK (1 << 4)
+#define USBCTRL_USB_BYP (1 << 2)
+#define USBCTRL_HOST1_TXEN_OE (1 << 1)
+
+
+/* Values in TD blocks */
+#define TD_DIR_SETUP 0
+#define TD_DIR_OUT 1
+#define TD_DIR_IN 2
+#define TD_FORMAT_CONTROL 0
+#define TD_FORMAT_ISO 1
+#define TD_FORMAT_BULK 2
+#define TD_FORMAT_INT 3
+#define TD_TOGGLE_CARRY 0
+#define TD_TOGGLE_DATA0 2
+#define TD_TOGGLE_DATA1 3
+
+/* control transfer states */
+#define US_CTRL_SETUP 2
+#define US_CTRL_DATA 1
+#define US_CTRL_ACK 0
+
+/* bulk transfer main state and 0-length packet */
+#define US_BULK 1
+#define US_BULK0 0
+
+/*ETD format description*/
+#define IMX_FMT_CTRL 0x0
+#define IMX_FMT_ISO 0x1
+#define IMX_FMT_BULK 0x2
+#define IMX_FMT_INT 0x3
+
+static char fmt_urb_to_etd[4] = {
+/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
+/*PIPE_INTERRUPT*/ IMX_FMT_INT,
+/*PIPE_CONTROL*/ IMX_FMT_CTRL,
+/*PIPE_BULK*/ IMX_FMT_BULK
+};
+
+/* condition (error) CC codes and mapping (OHCI like) */
+
+#define TD_CC_NOERROR 0x00
+#define TD_CC_CRC 0x01
+#define TD_CC_BITSTUFFING 0x02
+#define TD_CC_DATATOGGLEM 0x03
+#define TD_CC_STALL 0x04
+#define TD_DEVNOTRESP 0x05
+#define TD_PIDCHECKFAIL 0x06
+/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
+#define TD_DATAOVERRUN 0x08
+#define TD_DATAUNDERRUN 0x09
+#define TD_BUFFEROVERRUN 0x0C
+#define TD_BUFFERUNDERRUN 0x0D
+#define TD_SCHEDULEOVERRUN 0x0E
+#define TD_NOTACCESSED 0x0F
+
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -ENOSPC,
+ /* (for HCD) */ -EALREADY
+};
+
+/* HCD data associated with a usb core URB */
+struct urb_priv {
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int active;
+ int state;
+ struct td *isoc_td;
+ int isoc_remaining;
+ int isoc_status;
+};
+
+/* HCD data associated with a usb core endpoint */
+struct ep_priv {
+ struct usb_host_endpoint *ep;
+ struct list_head td_list;
+ struct list_head queue;
+ int etd[NUM_ISO_ETDS];
+ int waiting_etd;
+};
+
+/* isoc packet */
+struct td {
+ struct list_head list;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ dma_addr_t data;
+ unsigned long buf_addr;
+ int len;
+ int frame;
+ int isoc_index;
+};
+
+/* HCD data associated with a hardware ETD */
+struct etd_priv {
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ struct td *td;
+ struct list_head queue;
+ dma_addr_t dma_handle;
+ int alloc;
+ int len;
+ int dmem_size;
+ int dmem_offset;
+ int active_count;
+#ifdef DEBUG
+ int activated_frame;
+ int disactivated_frame;
+ int last_int_frame;
+ int last_req_frame;
+ u32 submitted_dwords[4];
+#endif
+};
+
+/* Hardware data memory info */
+struct imx21_dmem_area {
+ struct usb_host_endpoint *ep;
+ unsigned int offset;
+ unsigned int size;
+ struct list_head list;
+};
+
+#ifdef DEBUG
+struct debug_usage_stats {
+ unsigned int value;
+ unsigned int maximum;
+};
+
+struct debug_stats {
+ unsigned long submitted;
+ unsigned long completed_ok;
+ unsigned long completed_failed;
+ unsigned long unlinked;
+ unsigned long queue_etd;
+ unsigned long queue_dmem;
+};
+
+struct debug_isoc_trace {
+ int schedule_frame;
+ int submit_frame;
+ int request_len;
+ int done_frame;
+ int done_len;
+ int cc;
+ struct td *td;
+};
+#endif
+
+/* HCD data structure */
+struct imx21 {
+ spinlock_t lock;
+ struct device *dev;
+ struct mx21_usbh_platform_data *pdata;
+ struct list_head dmem_list;
+ struct list_head queue_for_etd; /* eps queued due to etd shortage */
+ struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
+ struct etd_priv etd[USB_NUM_ETD];
+ struct clk *clk;
+ void __iomem *regs;
+#ifdef DEBUG
+ struct dentry *debug_root;
+ struct debug_stats nonisoc_stats;
+ struct debug_stats isoc_stats;
+ struct debug_usage_stats etd_usage;
+ struct debug_usage_stats dmem_usage;
+ struct debug_isoc_trace isoc_trace[20];
+ struct debug_isoc_trace isoc_trace_failed[20];
+ unsigned long debug_unblocks;
+ int isoc_trace_index;
+ int isoc_trace_index_failed;
+#endif
+};
+
+#endif
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 4297165..217fb51 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1257,7 +1257,7 @@
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
- ep = kcalloc(1, sizeof *ep, mem_flags);
+ ep = kzalloc(sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
@@ -2719,24 +2719,11 @@
}
irq = irq_res->start;
-#ifdef CONFIG_USB_HCD_DMA
- if (pdev->dev.dma_mask) {
- struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-
- if (!dma_res) {
- retval = -ENODEV;
- goto err1;
- }
- isp1362_hcd->data_dma = dma_res->start;
- isp1362_hcd->max_dma_size = resource_len(dma_res);
- }
-#else
if (pdev->dev.dma_mask) {
DBG(1, "won't do DMA");
retval = -ENODEV;
goto err1;
}
-#endif
if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
retval = -EBUSY;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27b8f7c..9f01293 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <asm/unaligned.h>
+#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@
status = 0;
}
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
+ }
+
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
spin_unlock(&priv->lock);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977..4293cfd 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@
return 0;
}
-static struct of_device_id of_isp1760_match[] = {
+static const struct of_device_id of_isp1760_match[] = {
{
.compatible = "nxp,usb-isp1760",
},
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 0000000..4aa08d3
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * TI DA8xx (OMAP-L1x) Bus Glue
+ *
+ * Derived from: ohci-omap.c and ohci-s3c2410.c
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/da8xx.h>
+#include <mach/usb.h>
+
+#ifndef CONFIG_ARCH_DAVINCI_DA8XX
+#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
+#endif
+
+#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
+
+static struct clk *usb11_clk;
+static struct clk *usb20_clk;
+
+/* Over-current indicator change bitmask */
+static volatile u16 ocic_mask;
+
+static void ohci_da8xx_clock(int on)
+{
+ u32 cfgchip2;
+
+ cfgchip2 = __raw_readl(CFGCHIP2);
+ if (on) {
+ clk_enable(usb11_clk);
+
+ /*
+ * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
+ * need to enable the USB 2.0 module clocking, start its PHY,
+ * and not allow it to stop the clock during USB 2.0 suspend.
+ */
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
+ clk_enable(usb20_clk);
+
+ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
+ cfgchip2 |= CFGCHIP2_PHY_PLLON;
+ __raw_writel(cfgchip2, CFGCHIP2);
+
+ pr_info("Waiting for USB PHY clock good...\n");
+ while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
+ cpu_relax();
+ }
+
+ /* Enable USB 1.1 PHY */
+ cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
+ } else {
+ clk_disable(usb11_clk);
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
+ clk_disable(usb20_clk);
+
+ /* Disable USB 1.1 PHY */
+ cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ }
+ __raw_writel(cfgchip2, CFGCHIP2);
+}
+
+/*
+ * Handle the port over-current indicator change.
+ */
+static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
+ unsigned port)
+{
+ ocic_mask |= 1 << port;
+
+ /* Once over-current is detected, the port needs to be powered down */
+ if (hub->get_oci(port) > 0)
+ hub->set_power(port, 0);
+}
+
+static int ohci_da8xx_init(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+ u32 rh_a;
+
+ dev_dbg(dev, "starting USB controller\n");
+
+ ohci_da8xx_clock(1);
+
+ /*
+ * DA8xx only have 1 port connected to the pins but the HC root hub
+ * register A reports 2 ports, thus we'll have to override it...
+ */
+ ohci->num_ports = 1;
+
+ result = ohci_init(ohci);
+ if (result < 0)
+ return result;
+
+ /*
+ * Since we're providing a board-specific root hub port power control
+ * and over-current reporting, we have to override the HC root hub A
+ * register's default value, so that ohci_hub_control() could return
+ * the correct hub descriptor...
+ */
+ rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
+ if (hub->set_power) {
+ rh_a &= ~RH_A_NPS;
+ rh_a |= RH_A_PSM;
+ }
+ if (hub->get_oci) {
+ rh_a &= ~RH_A_NOCP;
+ rh_a |= RH_A_OCPM;
+ }
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
+
+ return result;
+}
+
+static void ohci_da8xx_stop(struct usb_hcd *hcd)
+{
+ ohci_stop(hcd);
+ ohci_da8xx_clock(0);
+}
+
+static int ohci_da8xx_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+
+ result = ohci_run(ohci);
+ if (result < 0)
+ ohci_da8xx_stop(hcd);
+
+ return result;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ int length = ohci_hub_status_data(hcd, buf);
+
+ /* See if we have OCIC bit set on port 1 */
+ if (ocic_mask & (1 << 1)) {
+ dev_dbg(hcd->self.controller, "over-current indicator change "
+ "on port 1\n");
+
+ if (!length)
+ length = 1;
+
+ buf[0] |= 1 << 1;
+ }
+ return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ int temp;
+
+ switch (typeReq) {
+ case GetPortStatus:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
+
+ temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
+
+ /* The port power status (PPS) bit defaults to 1 */
+ if (hub->get_power && hub->get_power(wIndex) == 0)
+ temp &= ~RH_PS_PPS;
+
+ /* The port over-current indicator (POCI) bit is always 0 */
+ if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ temp |= RH_PS_POCI;
+
+ /* The over-current indicator change (OCIC) bit is 0 too */
+ if (ocic_mask & (1 << wIndex))
+ temp |= RH_PS_OCIC;
+
+ put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
+ return 0;
+ case SetPortFeature:
+ temp = 1;
+ goto check_port;
+ case ClearPortFeature:
+ temp = 0;
+
+check_port:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex, "POWER");
+
+ if (!hub->set_power)
+ return -EPIPE;
+
+ return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex,
+ "C_OVER_CURRENT");
+
+ if (temp)
+ ocic_mask |= 1 << wIndex;
+ else
+ ocic_mask &= ~(1 << wIndex);
+ return 0;
+ }
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+static const struct hc_driver ohci_da8xx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "DA8xx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_da8xx_init,
+ .start = ohci_da8xx_start,
+ .stop = ohci_da8xx_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_da8xx_hub_status_data,
+ .hub_control = ohci_da8xx_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+
+/**
+ * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct resource *mem;
+ int error, irq;
+
+ if (hub == NULL)
+ return -ENODEV;
+
+ usb11_clk = clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(usb11_clk))
+ return PTR_ERR(usb11_clk);
+
+ usb20_clk = clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk)) {
+ error = PTR_ERR(usb20_clk);
+ goto err0;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ error = -ENODEV;
+ goto err2;
+ }
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = mem->end - mem->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ dev_dbg(&pdev->dev, "request_mem_region failed\n");
+ error = -EBUSY;
+ goto err2;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err3;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto err4;
+ }
+ error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (error)
+ goto err4;
+
+ if (hub->ocic_notify) {
+ error = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ if (!error)
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ clk_put(usb20_clk);
+err0:
+ clk_put(usb11_clk);
+ return error;
+}
+
+/**
+ * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static inline void
+usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+
+ hub->ocic_notify(NULL);
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ clk_put(usb20_clk);
+ clk_put(usb11_clk);
+}
+
+static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
+{
+ return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
+}
+
+static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_hcd_da8xx_remove(hcd, dev);
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(0);
+ hcd->state = HC_STATE_SUSPENDED;
+ dev->dev.power.power_state = PMSG_SUSPEND;
+ return 0;
+}
+
+static int ohci_da8xx_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(1);
+ dev->dev.power.power_state = PMSG_ON;
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+#endif
+
+/*
+ * Driver definition to register with platform structure.
+ */
+static struct platform_driver ohci_hcd_da8xx_driver = {
+ .probe = ohci_hcd_da8xx_drv_probe,
+ .remove = ohci_hcd_da8xx_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ohci_da8xx_suspend,
+ .resume = ohci_da8xx_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci",
+ },
+};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfd..8ad2441 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@
int i, len;
if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG __FILE__ ": setup(8):");
+ printk (KERN_DEBUG "%s: setup(8):", __FILE__);
for (i = 0; i < 8 ; i++)
printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
printk ("\n");
}
if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG __FILE__ ": data(%d/%d):",
+ printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
urb->actual_length,
urb->transfer_buffer_length);
len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb747..afe59be 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@
#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+#include "ohci-da8xx.c"
+#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
+#endif
+
#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283..18d39f0 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@
static void lh7a404_start_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": starting LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
+ __FILE__);
/*
* Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@
udelay(1000);
USBH_CMDSTATUS = OHCI_HCR;
- printk(KERN_DEBUG __FILE__
- ": Clock to USB host has been enabled \n");
+ printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
}
static void lh7a404_stop_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": stopping LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
+ __FILE__);
CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2769326..cd74bbd 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@
}
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c);
i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@
out2:
clk_put(usb_clk);
out1:
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
out_i2c_driver:
i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@
pnx4008_unset_usb_bits();
clk_disable(usb_clk);
clk_put(usb_clk);
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
i2c_del_driver(&isp1301_driver);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a3017..103263c 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -169,7 +169,7 @@
} else
release_mem_region(res.start, 0x4);
} else
- pr_debug(__FILE__ ": cannot get ehci offset from fdt\n");
+ pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
}
iounmap(hcd->regs);
@@ -212,7 +212,7 @@
}
-static struct of_device_id ohci_hcd_ppc_of_match[] = {
+static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
.name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b..89e670e 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_debug(__FILE__ ": no irq\n");
+ pr_debug("%s: no irq\n", __FILE__);
return -ENODEV;
}
irq = res->start;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- pr_debug(__FILE__ ": no reg addr\n");
+ pr_debug("%s: no reg addr\n", __FILE__);
return -ENODEV;
}
@@ -59,14 +59,14 @@
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug(__FILE__ ": request_mem_region failed\n");
+ pr_debug("%s: request_mem_region failed\n", __FILE__);
retval = -EBUSY;
goto err1;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- pr_debug(__FILE__ ": ioremap failed\n");
+ pr_debug("%s: ioremap failed\n", __FILE__);
retval = -ENOMEM;
goto err2;
}
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e..d8eb3bd 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@
{
unsigned int usb_rst = 0;
- printk(KERN_DEBUG __FILE__
- ": starting SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
+ __FILE__);
#ifdef CONFIG_SA1100_BADGE4
if (machine_is_badge4()) {
@@ -65,8 +65,8 @@
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
- printk(KERN_DEBUG __FILE__
- ": stopping SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
+ __FILE__);
/*
* Put the USB host controller into reset.
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d..e11cc3a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "../core/hcd.h"
#include "sl811.h"
@@ -1272,12 +1273,12 @@
sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
- *(__le32 *) buf = cpu_to_le32(0);
+ put_unaligned_le32(0, buf);
break;
case GetPortStatus:
if (wIndex != 1)
goto error;
- *(__le32 *) buf = cpu_to_le32(sl811->port1);
+ put_unaligned_le32(sl811->port1, buf);
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 99cd00f..0919706 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
+ synchronize_irq(hcd->irq);
del_timer_sync(&uhci->fsbr_timer);
release_uhci(uhci);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d5..105fa8b 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@
}
}
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+
+ switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
+ case 0:
+ return "enabled/disabled";
+ case 1:
+ return "default";
+ case 2:
+ return "addressed";
+ case 3:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c..78c4eda 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@
next = readl(base + ext_offset);
- if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
/* Find the first extended capability */
next = XHCI_HCC_EXT_CAPS(next);
- else
+ ext_offset = 0;
+ } else {
/* Find the next extended capability */
next = XHCI_EXT_CAPS_NEXT(next);
+ }
+
if (!next)
return 0;
/*
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5e92c72..4cb69e0 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1007,7 +1007,7 @@
* for usb_set_interface() and usb_set_configuration() claim).
*/
if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_KERNEL) < 0) {
+ udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
@@ -1181,6 +1181,8 @@
ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
udev->slot_id);
if (ret < 0) {
+ if (command)
+ list_del(&command->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1264,30 +1266,13 @@
xhci_zero_in_ctx(xhci, virt_dev);
/* Install new rings and free or cache any old rings */
for (i = 1; i < 31; ++i) {
- int rings_cached;
-
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
- rings_cached = virt_dev->num_rings_cached;
- if (rings_cached < XHCI_MAX_RINGS_CACHED) {
- virt_dev->num_rings_cached++;
- rings_cached = virt_dev->num_rings_cached;
- virt_dev->ring_cache[rings_cached] =
- virt_dev->eps[i].ring;
- xhci_dbg(xhci, "Cached old ring, "
- "%d ring%s cached\n",
- rings_cached,
- (rings_cached > 1) ? "s" : "");
- } else {
- xhci_ring_free(xhci, virt_dev->eps[i].ring);
- xhci_dbg(xhci, "Ring cache full (%d rings), "
- "freeing ring\n",
- virt_dev->num_rings_cached);
- }
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
@@ -1458,6 +1443,131 @@
}
/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * xhci_address_device(), and then re-set up the configuration. If this is
+ * called because of a usb_reset_and_verify_device(), then the old alternate
+ * settings will be re-installed through the normal bandwidth allocation
+ * functions.
+ *
+ * Wait for the Reset Device command to finish. Remove all structures
+ * associated with the endpoints that were disabled. Clear the input device
+ * structure? Cache the rings? Reset the control endpoint 0 max packet size?
+ */
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int ret, i;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ unsigned int slot_id;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *reset_device_cmd;
+ int timeleft;
+ int last_freed_endpoint;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+ slot_id = udev->slot_id;
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev) {
+ xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
+ __func__, slot_id);
+ return -EINVAL;
+ }
+
+ xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
+ /* Allocate the command structure that holds the struct completion.
+ * Assume we're in process context, since the normal device reset
+ * process has to wait for the device anyway. Storage devices are
+ * reset as part of error handling, so use GFP_NOIO instead of
+ * GFP_KERNEL.
+ */
+ reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ if (!reset_device_cmd) {
+ xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Attempt to submit the Reset Device command to the command ring */
+ spin_lock_irqsave(&xhci->lock, flags);
+ reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
+ ret = xhci_queue_reset_device(xhci, slot_id);
+ if (ret) {
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto command_cleanup;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the Reset Device command to finish */
+ timeleft = wait_for_completion_interruptible_timeout(
+ reset_device_cmd->completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for reset device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* The timeout might have raced with the event ring handler, so
+ * only delete from the list if the item isn't poisoned.
+ */
+ if (reset_device_cmd->cmd_list.next != LIST_POISON1)
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = -ETIME;
+ goto command_cleanup;
+ }
+
+ /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
+ * unless we tried to reset a slot ID that wasn't enabled,
+ * or the device wasn't in the addressed or configured state.
+ */
+ ret = reset_device_cmd->status;
+ switch (ret) {
+ case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
+ case COMP_CTX_STATE: /* 0.96 completion code for same thing */
+ xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
+ slot_id,
+ xhci_get_slot_state(xhci, virt_dev->out_ctx));
+ xhci_info(xhci, "Not freeing device rings.\n");
+ /* Don't treat this as an error. May change my mind later. */
+ ret = 0;
+ goto command_cleanup;
+ case COMP_SUCCESS:
+ xhci_dbg(xhci, "Successful reset device command.\n");
+ break;
+ default:
+ if (xhci_is_vendor_info_code(xhci, ret))
+ break;
+ xhci_warn(xhci, "Unknown completion code %u for "
+ "reset device command.\n", ret);
+ ret = -EINVAL;
+ goto command_cleanup;
+ }
+
+ /* Everything but endpoint 0 is disabled, so free or cache the rings. */
+ last_freed_endpoint = 1;
+ for (i = 1; i < 31; ++i) {
+ if (!virt_dev->eps[i].ring)
+ continue;
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
+ xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
+ ret = 0;
+
+command_cleanup:
+ xhci_free_command(xhci, reset_device_cmd);
+ return ret;
+}
+
+/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
@@ -1694,7 +1804,7 @@
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
- config_cmd = xhci_alloc_command(xhci, true, mem_flags);
+ config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53..208b805 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@
return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
}
+static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
+ u32 __iomem *addr, u32 port_status)
+{
+ /* Write 1 to disable the port */
+ xhci_writel(xhci, port_status | PORT_PE, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
+ wIndex, port_status);
+}
+
+static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ u16 wIndex, u32 __iomem *addr, u32 port_status)
+{
+ char *port_change_bit;
+ u32 status;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ status = PORT_PEC;
+ port_change_bit = "enable/disable";
+ break;
+ default:
+ /* Should never happen */
+ return;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, port_status | status, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, port_status);
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -138,7 +182,6 @@
u32 temp, status;
int retval = 0;
u32 __iomem *addr;
- char *port_change_bit;
ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -229,26 +272,18 @@
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_C_RESET:
- status = PORT_RC;
- port_change_bit = "reset";
- break;
case USB_PORT_FEAT_C_CONNECTION:
- status = PORT_CSC;
- port_change_bit = "connect";
- break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- status = PORT_OCC;
- port_change_bit = "over-current";
+ case USB_PORT_FEAT_C_ENABLE:
+ xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ addr, temp);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xhci_disable_port(xhci, wIndex, addr, temp);
break;
default:
goto error;
}
- /* Change bits are all write 1 to clear */
- xhci_writel(xhci, temp | status, addr);
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
- port_change_bit, wIndex, temp);
- temp = xhci_readl(xhci, addr); /* unblock any posted writes */
break;
default:
error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7..49f7d72 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -198,6 +198,31 @@
return 0;
}
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index)
+{
+ int rings_cached;
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
+ virt_dev->eps[ep_index].ring = NULL;
+}
+
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
@@ -242,6 +267,8 @@
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ if (!ctx)
+ return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
@@ -427,7 +454,7 @@
case USB_SPEED_LOW:
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -471,7 +498,7 @@
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -819,7 +846,8 @@
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags)
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags)
{
struct xhci_command *command;
@@ -827,11 +855,14 @@
if (!command)
return NULL;
- command->in_ctx =
- xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
- if (!command->in_ctx) {
- kfree(command);
- return NULL;
+ if (allocate_in_ctx) {
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command);
+ return NULL;
+ }
}
if (allocate_completion) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e097008..417d37a 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,7 @@
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
+ .reset_device = xhci_reset_device,
/*
* scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7bc7e..6ba841b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -953,6 +953,17 @@
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
+ case TRB_TYPE(TRB_RESET_DEV):
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ slot_id = TRB_TO_SLOT_ID(
+ xhci->cmd_ring->dequeue->generic.field[3]);
+ virt_dev = xhci->devs[slot_id];
+ if (virt_dev)
+ handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
+ else
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
@@ -1080,6 +1091,20 @@
return 0;
}
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+{
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ return 1;
+ }
+ return 0;
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1196,13 +1221,7 @@
status = -ENOSR;
break;
default:
- if (trb_comp_code >= 224 && trb_comp_code <= 255) {
- /* Vendor defined "informational" completion code,
- * treat as not-an-error.
- */
- xhci_dbg(xhci, "Vendor defined info completion code %u\n",
- trb_comp_code);
- xhci_dbg(xhci, "Treating code as success.\n");
+ if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
@@ -2181,6 +2200,14 @@
false);
}
+/* Queue a reset device command TRB */
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+}
+
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8778135..e5eb09b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1210,6 +1210,8 @@
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1233,8 +1235,12 @@
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags);
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
@@ -1264,6 +1270,7 @@
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
@@ -1272,6 +1279,7 @@
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb, union xhci_trb *end_trb,
dma_addr_t suspect_dma);
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1293,6 +1301,7 @@
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index eca355d..e192e8f 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -967,7 +967,7 @@
-static struct usb_device_id mdc800_table [] = {
+static const struct usb_device_id mdc800_table[] = {
{ USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 459a728..3a6bcd5 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -155,7 +155,7 @@
const struct usb_device_id *id);
static void mts_usb_disconnect(struct usb_interface *intf);
-static struct usb_device_id mts_usb_ids [];
+static const struct usb_device_id mts_usb_ids[];
static struct usb_driver mts_usb_driver = {
.name = "microtekX6",
@@ -656,7 +656,7 @@
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
-static struct usb_device_id mts_usb_ids [] =
+static const struct usb_device_id mts_usb_ids[] =
{
{ USB_DEVICE(0x4ce, 0x0300) },
{ USB_DEVICE(0x5da, 0x0094) },
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index abe3aa6..55660ea 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -87,17 +87,6 @@
To compile this driver as a module, choose M here: the
module will be called usblcd.
-config USB_BERRY_CHARGE
- tristate "USB BlackBerry recharge support"
- depends on USB
- help
- Say Y here if you want to connect a BlackBerry device to your
- computer's USB port and have it automatically switch to "recharge"
- mode.
-
- To compile this driver as a module, choose M here: the
- module will be called berry_charge.
-
config USB_LED
tristate "USB LED driver support"
depends on USB
@@ -242,17 +231,3 @@
driver beforehand. Tools for doing so are available at
http://bersace03.free.fr
-config USB_VST
- tristate "USB VST driver"
- depends on USB
- help
- This driver is intended for Vernier Software Technologies
- bulk usb devices such as their Ocean-Optics spectrometers or
- Labquest.
- It is a bulk channel driver with configurable read and write
- timeouts.
-
- To compile this driver as a module, choose M here: the
- module will be called vstusb.
-
-
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0826aab..717703e 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
obj-$(CONFIG_USB_EMI26) += emi26.o
@@ -23,7 +22,6 @@
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
-obj-$(CONFIG_USB_VST) += vstusb.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 2035265..d240de0 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -38,7 +38,7 @@
#define dbg(lvl, format, arg...) \
do { \
if (debug >= lvl) \
- printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
} while (0)
@@ -56,7 +56,7 @@
#define ADU_PRODUCT_ID 0x0064
/* table of devices that work with this driver */
-static struct usb_device_id device_table [] = {
+static const struct usb_device_id device_table[] = {
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
@@ -132,8 +132,8 @@
if (debug < level)
return;
- printk(KERN_DEBUG __FILE__": %s - length = %d, data = ",
- function, size);
+ printk(KERN_DEBUG "%s: %s - length = %d, data = ",
+ __FILE__, function, size);
for (i = 0; i < size; ++i)
printk("%.2x ", data[i]);
printk("\n");
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1eb9e41..4d2952f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -57,7 +57,7 @@
.bInterfaceProtocol = 0x00
/* table of devices that work with this driver */
-static struct usb_device_id appledisplay_table [] = {
+static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9218) },
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
@@ -179,7 +179,7 @@
return pdata->msgdata[1];
}
-static struct backlight_ops appledisplay_bl_data = {
+static const struct backlight_ops appledisplay_bl_data = {
.get_brightness = appledisplay_bl_get_brightness,
.update_status = appledisplay_bl_update_status,
};
@@ -283,6 +283,7 @@
&appledisplay_bl_data);
if (IS_ERR(pdata->bd)) {
dev_err(&iface->dev, "Backlight registration failed\n");
+ retval = PTR_ERR(pdata->bd);
goto error;
}
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
deleted file mode 100644
index c05a85b..0000000
--- a/drivers/usb/misc/berry_charge.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * USB BlackBerry charging module
- *
- * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- * Information on how to switch configs was taken by the bcharge.cc file
- * created by the barry.sf.net project.
- *
- * bcharge.cc has the following copyright:
- * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/)
- * and is released under the GPLv2.
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-
-#define RIM_VENDOR 0x0fca
-#define BLACKBERRY 0x0001
-#define BLACKBERRY_PEARL_DUAL 0x0004
-#define BLACKBERRY_PEARL 0x0006
-
-static int debug;
-static int pearl_dual_mode = 1;
-
-#ifdef dbg
-#undef dbg
-#endif
-#define dbg(dev, format, arg...) \
- if (debug) \
- dev_printk(KERN_DEBUG , dev , format , ## arg)
-
-static struct usb_device_id id_table [] = {
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
- { }, /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static int magic_charge(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send two magic commands and then set the configuration. The device
- * will then reset itself with the new power usage and should start
- * charging. */
-
- /* Note, with testing, it only seems that the first message is really
- * needed (at least for the 8700c), but to be safe, we emulate what
- * other operating systems seem to be sending to their device. We
- * really need to get some specs for this device to be sure about what
- * is going on here.
- */
- dbg(&udev->dev, "Sending first magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100);
- if (retval != 2) {
- dev_err(&udev->dev, "First magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Sending second magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100);
- if (retval != 0) {
- dev_err(&udev->dev, "Second magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
-exit:
- kfree(dummy_buffer);
- return retval;
-}
-
-static int magic_dual_mode(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send magic command so that the Blackberry Pearl device exposes
- * two interfaces: both the USB mass-storage one and one which can
- * be used for database access. */
- dbg(&udev->dev, "Sending magic pearl command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
- dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
- kfree(dummy_buffer);
- return retval;
-}
-
-static int berry_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (udev->bus_mA < 500) {
- dbg(&udev->dev, "Not enough power to charge available\n");
- return -ENODEV;
- }
-
- dbg(&udev->dev, "Power is set to %dmA\n",
- udev->actconfig->desc.bMaxPower * 2);
-
- /* check the power usage so we don't try to enable something that is
- * already enabled */
- if ((udev->actconfig->desc.bMaxPower * 2) == 500) {
- dbg(&udev->dev, "device is already charging, power is "
- "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2);
- return -ENODEV;
- }
-
- /* turn the power on */
- magic_charge(udev);
-
- if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
- (pearl_dual_mode))
- magic_dual_mode(udev);
-
- /* we don't really want to bind to the device, userspace programs can
- * handle the syncing just fine, so get outta here. */
- return -ENODEV;
-}
-
-static void berry_disconnect(struct usb_interface *intf)
-{
-}
-
-static struct usb_driver berry_driver = {
- .name = "berry_charge",
- .probe = berry_probe,
- .disconnect = berry_disconnect,
- .id_table = id_table,
-};
-
-static int __init berry_init(void)
-{
- return usb_register(&berry_driver);
-}
-
-static void __exit berry_exit(void)
-{
- usb_deregister(&berry_driver);
-}
-
-module_init(berry_init);
-module_exit(berry_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5720bfe..1547d8c 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -56,7 +56,7 @@
/* table of devices that work with this driver */
-static struct usb_device_id cypress_table [] = {
+static const struct usb_device_id cypress_table[] = {
{ USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 4fb3c38..b9cbbbd 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -27,7 +27,7 @@
#define USB_SKEL_VENDOR_ID 0x04b4
#define USB_SKEL_PRODUCT_ID 0x0002
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 879a980..a6521c9 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -245,7 +245,7 @@
return err;
}
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 59860b3..fc15ad4 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -259,7 +259,7 @@
return err;
}
-static __devinitdata struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] __devinitconst = {
{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9d0675e..1edb6d3 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -86,7 +86,7 @@
#define USB_FTDI_ELAN_VENDOR_ID 0x0403
#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea
/* table of devices that work with this driver*/
-static struct usb_device_id ftdi_elan_table[] = {
+static const struct usb_device_id ftdi_elan_table[] = {
{USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)},
{ /* Terminating entry */ }
};
@@ -623,9 +623,12 @@
*/
static int ftdi_elan_open(struct inode *inode, struct file *file)
{
- int subminor = iminor(inode);
- struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver,
- subminor);
+ int subminor;
+ struct usb_interface *interface;
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&ftdi_elan_driver, subminor);
+
if (!interface) {
printk(KERN_ERR "can't find device for minor %d\n", subminor);
return -ENODEV;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 1337a9c..a54c3cb 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -48,7 +48,7 @@
#define ID_CHERRY 0x0010
/* device ID table */
-static struct usb_device_id idmouse_table[] = {
+static const struct usb_device_id idmouse_table[] = {
{USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
{USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
{} /* terminating null entry */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e75bb87..d3c8523 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -139,7 +139,7 @@
/* driver registration */
/*---------------------*/
/* table of devices that work with this driver */
-static struct usb_device_id iowarrior_ids[] = {
+static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
@@ -602,10 +602,12 @@
dbg("%s", __func__);
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
+ unlock_kernel();
err("%s - error, can't find device for minor %d", __func__,
subminor);
return -ENODEV;
@@ -615,6 +617,7 @@
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&iowarrior_open_disc_lock);
+ unlock_kernel();
return -ENODEV;
}
@@ -641,6 +644,7 @@
out:
mutex_unlock(&dev->mutex);
+ unlock_kernel();
return retval;
}
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index b897f65..06e990a 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -26,7 +26,7 @@
#include <linux/errno.h>
#include <linux/module.h>
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05ac, 0x8300)},
{},
};
@@ -112,6 +112,8 @@
return ret;
}
+MODULE_FIRMWARE("isight.fw");
+
static void isight_firmware_disconnect(struct usb_interface *intf)
{
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 90f1301..dd41d87 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,7 +69,7 @@
#endif
/* table of devices that work with this driver */
-static struct usb_device_id ld_usb_table [] = {
+static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
@@ -798,7 +798,7 @@
/* register this driver with the USB subsystem */
retval = usb_register(&ld_usb_driver);
if (retval)
- err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval);
+ err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
return retval;
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index faa6d62..8547bf9 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -95,8 +95,11 @@
/* Use our own dbg macro */
#undef dbg
-#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0)
-
+#define dbg(lvl, format, arg...) \
+do { \
+ if (debug >= lvl) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
/* Version Information */
#define DRIVER_VERSION "v0.96"
@@ -192,7 +195,7 @@
/* table of devices that work with this driver */
-static struct usb_device_id tower_table [] = {
+static const struct usb_device_id tower_table[] = {
{ USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -302,7 +305,7 @@
if (debug < level)
return;
- printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size);
+ printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
for (i = 0; i < size; ++i) {
printk ("%.2x ", data[i]);
}
@@ -1055,7 +1058,7 @@
/* register this driver with the USB subsystem */
result = usb_register(&tower_driver);
if (result < 0) {
- err("usb_register failed for the "__FILE__" driver. Error number %d", result);
+ err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
retval = -1;
goto exit;
}
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 32d0199..a85771b 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -78,10 +78,13 @@
{
struct rio_usb_data *rio = &rio_instance;
+ /* against disconnect() */
+ lock_kernel();
mutex_lock(&(rio->lock));
if (rio->isopen || !rio->present) {
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return -EBUSY;
}
rio->isopen = 1;
@@ -91,6 +94,7 @@
mutex_unlock(&(rio->lock));
dev_info(&rio->rio_dev->dev, "Rio opened.\n");
+ unlock_kernel();
return 0;
}
@@ -115,7 +119,6 @@
int retries;
int retval=0;
- lock_kernel();
mutex_lock(&(rio->lock));
/* Sanity check to make sure rio is connected, powered, etc */
if (rio->present == 0 || rio->rio_dev == NULL) {
@@ -254,7 +257,6 @@
err_out:
mutex_unlock(&(rio->lock));
- unlock_kernel();
return retval;
}
@@ -489,6 +491,7 @@
struct rio_usb_data *rio = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
+ lock_kernel();
if (rio) {
usb_deregister_dev(intf, &usb_rio_class);
@@ -498,6 +501,7 @@
/* better let it finish - the release will do whats needed */
rio->rio_dev = NULL;
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return;
}
kfree(rio->ibuf);
@@ -508,9 +512,10 @@
rio->present = 0;
mutex_unlock(&(rio->lock));
}
+ unlock_kernel();
}
-static struct usb_device_id rio_table [] = {
+static const struct usb_device_id rio_table[] = {
{ USB_DEVICE(0x0841, 1) }, /* Rio 500 */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8b37a4b..aae95a0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -250,7 +250,7 @@
sisusb->urbstatus[index] |= SU_URB_BUSY;
/* Submit URB */
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
/* If OK, and if timeout > 0, wait for completion */
if ((retval == 0) && timeout) {
@@ -306,7 +306,7 @@
urb->actual_length = 0;
sisusb->completein = 0;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval == 0) {
wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
if (!sisusb->completein) {
@@ -2416,21 +2416,28 @@
struct usb_interface *interface;
int subminor = iminor(inode);
- if (!(interface = usb_find_interface(&sisusb_driver, subminor)))
+ lock_kernel();
+ if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
+ unlock_kernel();
return -ENODEV;
+ }
- if (!(sisusb = usb_get_intfdata(interface)))
+ if (!(sisusb = usb_get_intfdata(interface))) {
+ unlock_kernel();
return -ENODEV;
+ }
mutex_lock(&sisusb->lock);
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -EBUSY;
}
@@ -2439,11 +2446,13 @@
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
+ unlock_kernel();
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
+ unlock_kernel();
return -EIO;
}
}
@@ -2456,6 +2465,7 @@
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return 0;
}
@@ -3238,7 +3248,7 @@
kref_put(&sisusb->kref, sisusb_delete);
}
-static struct usb_device_id sisusb_table [] = {
+static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0550) },
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 2e14102..5da28ea 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -33,7 +33,7 @@
#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 4fb1203..90aede9 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -30,7 +30,7 @@
#define IOCTL_GET_DRV_VERSION 2
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
@@ -74,10 +74,12 @@
struct usb_interface *interface;
int subminor, r;
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
+ unlock_kernel();
err ("USBLCD: %s - error, can't find device for minor %d",
__func__, subminor);
return -ENODEV;
@@ -87,6 +89,7 @@
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
+ unlock_kernel();
return -ENODEV;
}
@@ -98,11 +101,13 @@
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
+ unlock_kernel();
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
+ unlock_kernel();
return 0;
}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 06cb719..63da2c3 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -24,7 +24,7 @@
#define PRODUCT_ID 0x1223
/* table of devices that work with this driver */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3db2555..a9555cb 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -27,7 +27,7 @@
#define MAXLEN 6
/* table of devices that work with this driver */
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 3dab0c0..a21cce6 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1580,10 +1580,6 @@
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
- if (!intf->is_active) {
- mutex_unlock(&dev->lock);
- return -EHOSTUNREACH;
- }
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
@@ -2101,7 +2097,7 @@
#endif
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9a6c27a..f56fed5 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -770,7 +770,7 @@
}
/* table of cables that work through this driver */
-static struct usb_device_id uss720_table [] = {
+static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x047e, 0x1001) },
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
deleted file mode 100644
index f26ea8d..0000000
--- a/drivers/usb/misc/vstusb.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/*****************************************************************************
- * File: drivers/usb/misc/vstusb.c
- *
- * Purpose: Support for the bulk USB Vernier Spectrophotometers
- *
- * Author: Johnnie Peters
- * Axian Consulting
- * Beaverton, OR, USA 97005
- *
- * Modified by: EQware Engineering, Inc.
- * Oregon City, OR, USA 97045
- *
- * Copyright: 2007, 2008
- * Vernier Software & Technology
- * Beaverton, OR, USA 97005
- *
- * Web: www.vernier.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <linux/usb/vstusb.h>
-
-#define DRIVER_VERSION "VST USB Driver Version 1.5"
-#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
-
-#ifdef CONFIG_USB_DYNAMIC_MINORS
- #define VSTUSB_MINOR_BASE 0
-#else
- #define VSTUSB_MINOR_BASE 199
-#endif
-
-#define USB_VENDOR_OCEANOPTICS 0x2457
-#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */
-
-#define USB_PRODUCT_USB2000 0x1002
-#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */
-#define USB_PRODUCT_ADC1000 0x1004
-#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR2000 0x100A
-#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR4000 0x1012
-#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */
-#define USB_PRODUCT_QE65000 0x1018
-#define USB_PRODUCT_USB4000 0x1022
-#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */
-
-#define USB_PRODUCT_LABPRO 0x0001
-#define USB_PRODUCT_LABQUEST 0x0005
-
-#define VST_MAXBUFFER (64*1024)
-
-static struct usb_device_id id_table[] = {
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
- {},
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-struct vstusb_device {
- struct kref kref;
- struct mutex lock;
- struct usb_device *usb_dev;
- char present;
- char isopen;
- struct usb_anchor submitted;
- int rd_pipe;
- int rd_timeout_ms;
- int wr_pipe;
- int wr_timeout_ms;
-};
-#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
-
-static struct usb_driver vstusb_driver;
-
-static void vstusb_delete(struct kref *kref)
-{
- struct vstusb_device *vstdev = to_vst_dev(kref);
-
- usb_put_dev(vstdev->usb_dev);
- kfree(vstdev);
-}
-
-static int vstusb_open(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
- struct usb_interface *interface;
-
- interface = usb_find_interface(&vstusb_driver, iminor(inode));
-
- if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s - error, can't find device for minor %d\n",
- __func__, iminor(inode));
- return -ENODEV;
- }
-
- vstdev = usb_get_intfdata(interface);
-
- if (!vstdev)
- return -ENODEV;
-
- /* lock this device */
- mutex_lock(&vstdev->lock);
-
- /* can only open one time */
- if ((!vstdev->present) || (vstdev->isopen)) {
- mutex_unlock(&vstdev->lock);
- return -EBUSY;
- }
-
- /* increment our usage count */
- kref_get(&vstdev->kref);
-
- vstdev->isopen = 1;
-
- /* save device in the file's private structure */
- file->private_data = vstdev;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_release(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- mutex_lock(&vstdev->lock);
-
- vstdev->isopen = 0;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
-
- return 0;
-}
-
-static void usb_api_blocking_completion(struct urb *urb)
-{
- struct completion *completeit = urb->context;
-
- complete(completeit);
-}
-
-static int vstusb_fill_and_send_urb(struct urb *urb,
- struct usb_device *usb_dev,
- unsigned int pipe, void *data,
- unsigned int len, struct completion *done)
-{
- struct usb_host_endpoint *ep;
- struct usb_host_endpoint **hostep;
- unsigned int pipend;
-
- int status;
-
- hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
- pipend = usb_pipeendpoint(pipe);
- ep = hostep[pipend];
-
- if (!ep || (len == 0))
- return -EINVAL;
-
- if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT) {
- pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
- usb_fill_int_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL, ep->desc.bInterval);
- } else
- usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL);
-
- init_completion(done);
- urb->context = done;
- urb->actual_length = 0;
- status = usb_submit_urb(urb, GFP_KERNEL);
-
- return status;
-}
-
-static int vstusb_complete_urb(struct urb *urb, struct completion *done,
- int timeout, int *actual_length)
-{
- unsigned long expire;
- int status;
-
- expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_interruptible_timeout(done, expire)) {
- usb_kill_urb(urb);
- status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
-
- dev_dbg(&urb->dev->dev,
- "%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- if (signal_pending(current)) {
- /* if really an error */
- if (urb->status && !((urb->status == -ENOENT) ||
- (urb->status == -ECONNRESET) ||
- (urb->status == -ESHUTDOWN))) {
- status = -EINTR;
- usb_kill_urb(urb);
- } else {
- status = 0;
- }
-
- dev_dbg(&urb->dev->dev,
- "%s: signal pending on ep%d%s len=%d/%d,"
- "urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- status = urb->status;
- }
- }
-
- if (actual_length)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-static ssize_t vstusb_read(struct file *file, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually want to read some data */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return -EINVAL;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe);
- timeout = vstdev->rd_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(buffer, buf, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
- retval = -EFAULT;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static ssize_t vstusb_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually have some data to write */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return retval;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe);
- timeout = vstdev->wr_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- if (copy_from_user(buf, buffer, count)) {
- mutex_unlock(&vstdev->lock);
- dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- int cnt = -1;
- void __user *data = (void __user *)arg;
- struct vstusb_args usb_data;
-
- struct vstusb_device *vstdev;
- void *buffer = NULL; /* must be initialized. buffer is
- * referenced on exit but not all
- * ioctls allocate it */
-
- struct urb *urb = NULL; /* must be initialized. urb is
- * referenced on exit but not all
- * ioctls allocate it */
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
- dev_warn(&vstdev->usb_dev->dev,
- "%s: ioctl command %x, bad ioctl magic %x, "
- "expected %x\n", __func__, cmd,
- _IOC_TYPE(cmd), VST_IOC_MAGIC);
- return -EINVAL;
- }
-
- if (vstdev == NULL)
- return -ENODEV;
-
- if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
- dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
- __func__);
- return -EFAULT;
- }
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
- __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
-
- switch (cmd) {
-
- case IOCTL_VSTUSB_CONFIG_RW:
-
- vstdev->rd_pipe = usb_data.rd_pipe;
- vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
- vstdev->wr_pipe = usb_data.wr_pipe;
- vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
-
- mutex_unlock(&vstdev->lock);
-
- dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
- "rdpipe = %d, rdtimeout = %d, "
- "wrpipe = %d, wrtimeout = %d\n", __func__,
- vstdev->rd_pipe, vstdev->rd_timeout_ms,
- vstdev->wr_pipe, vstdev->wr_timeout_ms);
- break;
-
- case IOCTL_VSTUSB_SEND_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_sndbulkpipe(dev, usb_data.pipe);
-
- if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
- dev_err(&dev->dev, "%s: can't copy_from_user\n",
- __func__);
- mutex_unlock(&vstdev->lock);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- }
-
- break;
- case IOCTL_VSTUSB_RECV_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(usb_data.buffer, buffer, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_data.count = cnt;
- if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- } else {
- dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
- __func__, usb_data.count, usb_data.pipe);
- }
-
- break;
-
- default:
- mutex_unlock(&vstdev->lock);
- dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
- cmd);
- return -EINVAL;
- break;
- }
-exit:
- usb_free_urb(urb);
- kfree(buffer);
- return retval;
-}
-
-static const struct file_operations vstusb_fops = {
- .owner = THIS_MODULE,
- .read = vstusb_read,
- .write = vstusb_write,
- .unlocked_ioctl = vstusb_ioctl,
- .compat_ioctl = vstusb_ioctl,
- .open = vstusb_open,
- .release = vstusb_release,
-};
-
-static struct usb_class_driver usb_vstusb_class = {
- .name = "usb/vstusb%d",
- .fops = &vstusb_fops,
- .minor_base = VSTUSB_MINOR_BASE,
-};
-
-static int vstusb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct vstusb_device *vstdev;
- int i;
- int retval = 0;
-
- /* allocate memory for our device state and intialize it */
-
- vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
- if (vstdev == NULL)
- return -ENOMEM;
-
- /* must do usb_get_dev() prior to kref_init() since the kref_put()
- * release function will do a usb_put_dev() */
- usb_get_dev(dev);
- kref_init(&vstdev->kref);
- mutex_init(&vstdev->lock);
-
- i = dev->descriptor.bcdDevice;
-
- dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
- (i & 0xF000) >> 12, (i & 0xF00) >> 8,
- (i & 0xF0) >> 4, (i & 0xF), dev->devnum);
-
- vstdev->present = 1;
- vstdev->isopen = 0;
- vstdev->usb_dev = dev;
- init_usb_anchor(&vstdev->submitted);
-
- usb_set_intfdata(intf, vstdev);
- retval = usb_register_dev(intf, &usb_vstusb_class);
- if (retval) {
- dev_err(&intf->dev,
- "%s: Not able to get a minor for this device.\n",
- __func__);
- usb_set_intfdata(intf, NULL);
- kref_put(&vstdev->kref, vstusb_delete);
- return retval;
- }
-
- /* let the user know what node this device is now attached to */
- dev_info(&intf->dev,
- "VST USB Device #%d now attached to major %d minor %d\n",
- (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
-
- dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
-
- return retval;
-}
-
-static void vstusb_disconnect(struct usb_interface *intf)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
-
- usb_deregister_dev(intf, &usb_vstusb_class);
- usb_set_intfdata(intf, NULL);
-
- if (vstdev) {
-
- mutex_lock(&vstdev->lock);
- vstdev->present = 0;
-
- usb_kill_anchored_urbs(&vstdev->submitted);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
- }
-
-}
-
-static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
- int time;
- if (!vstdev)
- return 0;
-
- mutex_lock(&vstdev->lock);
- time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
- if (!time)
- usb_kill_anchored_urbs(&vstdev->submitted);
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_resume(struct usb_interface *intf)
-{
- return 0;
-}
-
-static struct usb_driver vstusb_driver = {
- .name = "vstusb",
- .probe = vstusb_probe,
- .disconnect = vstusb_disconnect,
- .suspend = vstusb_suspend,
- .resume = vstusb_resume,
- .id_table = id_table,
-};
-
-static int __init vstusb_init(void)
-{
- int rc;
-
- rc = usb_register(&vstusb_driver);
- if (rc)
- printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
-
- return rc;
-}
-
-static void __exit vstusb_exit(void)
-{
- usb_deregister(&vstusb_driver);
-}
-
-module_init(vstusb_init);
-module_exit(vstusb_exit);
-
-MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
-MODULE_DESCRIPTION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 385ec05..6dd44bc 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -460,8 +460,8 @@
char ev_type, int status)
{
const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
- unsigned long flags;
struct timeval ts;
+ unsigned long flags;
unsigned int urb_length;
unsigned int offset;
unsigned int length;
@@ -600,10 +600,13 @@
static void mon_bin_error(void *data, struct urb *urb, int error)
{
struct mon_reader_bin *rp = data;
+ struct timeval ts;
unsigned long flags;
unsigned int offset;
struct mon_bin_hdr *ep;
+ do_gettimeofday(&ts);
+
spin_lock_irqsave(&rp->b_lock, flags);
offset = mon_buff_area_alloc(rp, PKT_SIZE);
@@ -623,6 +626,8 @@
ep->devnum = urb->dev->devnum;
ep->busnum = urb->dev->bus->busnum;
ep->id = (unsigned long) urb;
+ ep->ts_sec = ts.tv_sec;
+ ep->ts_usec = ts.tv_usec;
ep->status = error;
ep->flag_setup = '-';
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 047568f..31c1188 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -180,7 +180,7 @@
unsigned int stamp;
do_gettimeofday(&tval);
- stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */
+ stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
stamp = stamp * 1000000 + tval.tv_usec;
return stamp;
}
@@ -273,12 +273,12 @@
ep->type = 'E';
ep->id = (unsigned long) urb;
- ep->busnum = 0;
+ ep->busnum = urb->dev->bus->busnum;
ep->devnum = urb->dev->devnum;
ep->epnum = usb_endpoint_num(&urb->ep->desc);
ep->xfertype = usb_endpoint_type(&urb->ep->desc);
ep->is_in = usb_urb_dir_in(urb);
- ep->tstamp = 0;
+ ep->tstamp = mon_get_timestamp();
ep->length = 0;
ep->status = error;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ad26e65..bcee133 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -30,7 +30,6 @@
void __iomem *fifo = hw_ep->fifo;
void __iomem *epio = hw_ep->regs;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
prefetch((u8 *)src);
@@ -42,15 +41,17 @@
dump_fifo_data(src, len);
if (!ANOMALY_05000380 && epnum != 0) {
- flush_dcache_range((unsigned int)src,
- (unsigned int)(src + len));
+ u16 dma_reg;
+
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) src & 0xFFFF);
+ dma_reg = (u32)src;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) src >> 16) & 0xFFFF);
+ dma_reg = (u32)src >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
@@ -79,12 +80,9 @@
SSYNC();
if (unlikely((unsigned long)src & 0x01))
- outsw_8((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
else
- outsw((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-
+ outsw((unsigned long)fifo, src, (len + 1) >> 1);
}
}
/*
@@ -94,19 +92,19 @@
{
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
if (ANOMALY_05000467 && epnum != 0) {
+ u16 dma_reg;
- invalidate_dcache_range((unsigned int)dst,
- (unsigned int)(dst + len));
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) dst & 0xFFFF);
+ dma_reg = (u32)dst;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+ dma_reg = (u32)dst >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a44a450..3c69a76 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1191,8 +1191,13 @@
bd = tx_ch->head;
+ /*
+ * If Head is null then this could mean that a abort interrupt
+ * that needs to be acknowledged.
+ */
if (NULL == bd) {
DBG(1, "null BD\n");
+ tx_ram->tx_complete = 0;
continue;
}
@@ -1412,15 +1417,6 @@
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
- int enabled;
-
- /* mask interrupts raised to signal teardown complete. */
- enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
- & (1 << cppi_ch->index);
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
- (1 << cppi_ch->index));
-
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1435,7 +1431,6 @@
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
- musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
@@ -1448,23 +1443,15 @@
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
- /* While we scrub the TX state RAM, ensure that we clean
- * up any interrupt that's currently asserted:
+ /*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
- * 2. Write to completion Ptr value 0x0(bit 0 cleared)
- * (compare mode)
- * Value written is compared(for bits 31:2) and when
- * equal, interrupt is deasserted.
+ * 2. Wait for abort interrupt and then put the channel in
+ * compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
- musb_writel(&tx_ram->tx_complete, 0, 0);
-
- /* re-enable interrupt */
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
- (1 << cppi_ch->index));
-
+ cppi_ch->head = 0;
+ musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 738efd8..b4bbf8f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -557,6 +557,69 @@
handled = IRQ_HANDLED;
}
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
+ */
+ musb_hnp_stop(musb);
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
+
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ }
+
if (int_usb & MUSB_INTR_CONNECT) {
struct usb_hcd *hcd = musb_to_hcd(musb);
@@ -625,136 +688,6 @@
}
#endif /* CONFIG_USB_MUSB_HDRC_HCD */
- /* mentor saves a bit: bus reset and babble share the same irq.
- * only host sees babble; only peripheral sees bus reset.
- */
- if (int_usb & MUSB_INTR_RESET) {
- if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
- /*
- * Looks like non-HS BABBLE can be ignored, but
- * HS BABBLE is an error condition. For HS the solution
- * is to avoid babble in the first place and fix what
- * caused BABBLE. When HS BABBLE happens we can only
- * stop the session.
- */
- if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
- DBG(1, "BABBLE devctl: %02x\n", devctl);
- else {
- ERR("Stopping host session -- babble\n");
- musb_writeb(mbase, MUSB_DEVCTL, 0);
- }
- } else if (is_peripheral_capable()) {
- DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_OTG
- case OTG_STATE_A_SUSPEND:
- /* We need to ignore disconnect on suspend
- * otherwise tusb 2.0 won't reconnect after a
- * power cycle, which breaks otg compliance.
- */
- musb->ignore_disconnect = 1;
- musb_g_reset(musb);
- /* FALLTHROUGH */
- case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
- /* never use invalid T(a_wait_bcon) */
- DBG(1, "HNP: in %s, %d msec timeout\n",
- otg_state_string(musb),
- TA_WAIT_BCON(musb));
- mod_timer(&musb->otg_timer, jiffies
- + msecs_to_jiffies(TA_WAIT_BCON(musb)));
- break;
- case OTG_STATE_A_PERIPHERAL:
- musb->ignore_disconnect = 0;
- del_timer(&musb->otg_timer);
- musb_g_reset(musb);
- break;
- case OTG_STATE_B_WAIT_ACON:
- DBG(1, "HNP: RESET (%s), to b_peripheral\n",
- otg_state_string(musb));
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
- musb_g_reset(musb);
- break;
-#endif
- case OTG_STATE_B_IDLE:
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
- /* FALLTHROUGH */
- case OTG_STATE_B_PERIPHERAL:
- musb_g_reset(musb);
- break;
- default:
- DBG(1, "Unhandled BUS RESET as %s\n",
- otg_state_string(musb));
- }
- }
-
- handled = IRQ_HANDLED;
- }
- schedule_work(&musb->irq_work);
-
- return handled;
-}
-
-/*
- * Interrupt Service Routine to record USB "global" interrupts.
- * Since these do not happen often and signify things of
- * paramount importance, it seems OK to check them individually;
- * the order of the tests is specified in the manual
- *
- * @param musb instance pointer
- * @param int_usb register contents
- * @param devctl
- * @param power
- */
-static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
- u8 devctl, u8 power)
-{
- irqreturn_t handled = IRQ_NONE;
-
-#if 0
-/* REVISIT ... this would be for multiplexing periodic endpoints, or
- * supporting transfer phasing to prevent exceeding ISO bandwidth
- * limits of a given frame or microframe.
- *
- * It's not needed for peripheral side, which dedicates endpoints;
- * though it _might_ use SOF irqs for other purposes.
- *
- * And it's not currently needed for host side, which also dedicates
- * endpoints, relies on TX/RX interval registers, and isn't claimed
- * to support ISO transfers yet.
- */
- if (int_usb & MUSB_INTR_SOF) {
- void __iomem *mbase = musb->mregs;
- struct musb_hw_ep *ep;
- u8 epnum;
- u16 frame;
-
- DBG(6, "START_OF_FRAME\n");
- handled = IRQ_HANDLED;
-
- /* start any periodic Tx transfers waiting for current frame */
- frame = musb_readw(mbase, MUSB_FRAME);
- ep = musb->endpoints;
- for (epnum = 1; (epnum < musb->nr_endpoints)
- && (musb->epmask >= (1 << epnum));
- epnum++, ep++) {
- /*
- * FIXME handle framecounter wraps (12 bits)
- * eliminate duplicated StartUrb logic
- */
- if (ep->dwWaitFrame >= frame) {
- ep->dwWaitFrame = 0;
- pr_debug("SOF --> periodic TX%s on %d\n",
- ep->tx_channel ? " DMA" : "",
- epnum);
- if (!ep->tx_channel)
- musb_h_tx_start(musb, epnum);
- else
- cppi_hostdma_start(musb, epnum);
- }
- } /* end of for loop */
- }
-#endif
-
if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
otg_state_string(musb),
@@ -803,69 +736,118 @@
otg_state_string(musb));
break;
}
-
- schedule_work(&musb->irq_work);
}
- if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
- otg_state_string(musb), devctl, power);
+ /* mentor saves a bit: bus reset and babble share the same irq.
+ * only host sees babble; only peripheral sees bus reset.
+ */
+ if (int_usb & MUSB_INTR_RESET) {
+ handled = IRQ_HANDLED;
+ if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+ /*
+ * Looks like non-HS BABBLE can be ignored, but
+ * HS BABBLE is an error condition. For HS the solution
+ * is to avoid babble in the first place and fix what
+ * caused BABBLE. When HS BABBLE happens we can only
+ * stop the session.
+ */
+ if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+ DBG(1, "BABBLE devctl: %02x\n", devctl);
+ else {
+ ERR("Stopping host session -- babble\n");
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ }
+ } else if (is_peripheral_capable()) {
+ DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_OTG
+ case OTG_STATE_A_SUSPEND:
+ /* We need to ignore disconnect on suspend
+ * otherwise tusb 2.0 won't reconnect after a
+ * power cycle, which breaks otg compliance.
+ */
+ musb->ignore_disconnect = 1;
+ musb_g_reset(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+ /* never use invalid T(a_wait_bcon) */
+ DBG(1, "HNP: in %s, %d msec timeout\n",
+ otg_state_string(musb),
+ TA_WAIT_BCON(musb));
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(TA_WAIT_BCON(musb)));
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb->ignore_disconnect = 0;
+ del_timer(&musb->otg_timer);
+ musb_g_reset(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+ otg_state_string(musb));
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ musb_g_reset(musb);
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_reset(musb);
+ break;
+ default:
+ DBG(1, "Unhandled BUS RESET as %s\n",
+ otg_state_string(musb));
+ }
+ }
+ }
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (int_usb & MUSB_INTR_SOF) {
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *ep;
+ u8 epnum;
+ u16 frame;
+
+ DBG(6, "START_OF_FRAME\n");
handled = IRQ_HANDLED;
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_A_PERIPHERAL:
- /* We also come here if the cable is removed, since
- * this silicon doesn't report ID-no-longer-grounded.
- *
- * We depend on T(a_wait_bcon) to shut us down, and
- * hope users don't do anything dicey during this
- * undesired detour through A_WAIT_BCON.
+ /* start any periodic Tx transfers waiting for current frame */
+ frame = musb_readw(mbase, MUSB_FRAME);
+ ep = musb->endpoints;
+ for (epnum = 1; (epnum < musb->nr_endpoints)
+ && (musb->epmask >= (1 << epnum));
+ epnum++, ep++) {
+ /*
+ * FIXME handle framecounter wraps (12 bits)
+ * eliminate duplicated StartUrb logic
*/
- musb_hnp_stop(musb);
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon
- ? : OTG_TIME_A_WAIT_BCON));
- break;
-#endif
- case OTG_STATE_B_PERIPHERAL:
- musb_g_suspend(musb);
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->gadget->b_hnp_enable;
- if (musb->is_active) {
-#ifdef CONFIG_USB_MUSB_OTG
- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
- DBG(1, "HNP: Setting timer for b_ase0_brst\n");
- mod_timer(&musb->otg_timer, jiffies
- + msecs_to_jiffies(
- OTG_TIME_B_ASE0_BRST));
-#endif
+ if (ep->dwWaitFrame >= frame) {
+ ep->dwWaitFrame = 0;
+ pr_debug("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ epnum);
+ if (!ep->tx_channel)
+ musb_h_tx_start(musb, epnum);
+ else
+ cppi_hostdma_start(musb, epnum);
}
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (musb->a_wait_bcon != 0)
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
- case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_SUSPEND;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->host->b_hnp_enable;
- break;
- case OTG_STATE_B_HOST:
- /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- DBG(1, "REVISIT: SUSPEND as B_HOST\n");
- break;
- default:
- /* "should not happen" */
- musb->is_active = 0;
- break;
- }
- schedule_work(&musb->irq_work);
+ } /* end of for loop */
}
+#endif
+ schedule_work(&musb->irq_work);
return handled;
}
@@ -1095,6 +1077,36 @@
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
+/* mode 5 - fits in 8KB */
+static struct fifo_cfg __initdata mode_5_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
/*
* configure a fifo; for non-shared endpoints, this may be called
@@ -1210,6 +1222,10 @@
cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
+ case 5:
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
}
printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
@@ -1314,9 +1330,6 @@
*/
static int __init musb_core_init(u16 musb_type, struct musb *musb)
{
-#ifdef MUSB_AHB_ID
- u32 data;
-#endif
u8 reg;
char *type;
char aInfo[90], aRevision[32], aDate[12];
@@ -1328,23 +1341,17 @@
reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
- if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
strcat(aInfo, ", dyn FIFOs");
+ musb->dyn_fifo = true;
+ }
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
-#ifdef C_MP_RX
musb->bulk_combine = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
-#ifdef C_MP_TX
musb->bulk_split = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
@@ -1360,20 +1367,7 @@
printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
musb_driver_name, reg, aInfo);
-#ifdef MUSB_AHB_ID
- data = musb_readl(mbase, 0x404);
- sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
- (data >> 16) & 0xff, (data >> 24) & 0xff);
- /* FIXME ID2 and ID3 are unused */
- data = musb_readl(mbase, 0x408);
- printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
- data = musb_readl(mbase, 0x40c);
- printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
- reg = musb_readb(mbase, 0x400);
- musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
-#else
aDate[0] = 0;
-#endif
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
@@ -1404,21 +1398,10 @@
musb->nr_endpoints = 1;
musb->epmask = 1;
- if (reg & MUSB_CONFIGDATA_DYNFIFO) {
- if (musb->config->dyn_fifo)
- status = ep_config_from_table(musb);
- else {
- ERR("reconfigure software for Dynamic FIFOs\n");
- status = -ENODEV;
- }
- } else {
- if (!musb->config->dyn_fifo)
- status = ep_config_from_hw(musb);
- else {
- ERR("reconfigure software for static FIFOs\n");
- return -ENODEV;
- }
- }
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
if (status < 0)
return status;
@@ -1587,11 +1570,6 @@
ep_num++;
}
- /* finish handling "global" interrupts after handling fifos */
- if (musb->int_usb)
- retval |= musb_stage2_irq(musb,
- musb->int_usb, devctl, power);
-
return retval;
}
@@ -1696,7 +1674,7 @@
unsigned long val;
if (sscanf(buf, "%lu", &val) < 1) {
- printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ dev_err(dev, "Invalid VBUS timeout ms value\n");
return -EINVAL;
}
@@ -1746,7 +1724,7 @@
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
- printk(KERN_ERR "SRP: Value must be 1\n");
+ dev_err(dev, "SRP: Value must be 1\n");
return -EINVAL;
}
@@ -1759,6 +1737,19 @@
#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+static struct attribute *musb_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ &dev_attr_srp.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group musb_attr_group = {
+ .attrs = musb_attributes,
+};
+
#endif /* sysfs */
/* Only used to provide driver mode change events */
@@ -1833,11 +1824,7 @@
*/
#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
+ sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -2017,22 +2004,10 @@
musb->irq_wake = 0;
}
- pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
- musb_driver_name,
- ({char *s;
- switch (musb->board_mode) {
- case MUSB_HOST: s = "Host"; break;
- case MUSB_PERIPHERAL: s = "Peripheral"; break;
- default: s = "OTG"; break;
- }; s; }),
- ctrl,
- (is_dma_capable() && musb->dma_controller)
- ? "DMA" : "PIO",
- musb->nIrq);
-
/* host side needs more setup */
if (is_host_enabled(musb)) {
struct usb_hcd *hcd = musb_to_hcd(musb);
+ u8 busctl;
otg_set_host(musb->xceiv, &hcd->self);
@@ -2040,6 +2015,13 @@
hcd->self.otg_port = 1;
musb->xceiv->host = &hcd->self;
hcd->power_budget = 2 * (plat->power ? : 250);
+
+ /* program PHY to use external vBus if required */
+ if (plat->extvbus) {
+ busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
+ }
}
/* For the host-only role, we can activate right away.
@@ -2079,26 +2061,26 @@
}
#ifdef CONFIG_SYSFS
- status = device_create_file(dev, &dev_attr_mode);
- status = device_create_file(dev, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- status = device_create_file(dev, &dev_attr_srp);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
- status = 0;
+ status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
#endif
if (status)
goto fail2;
+ dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
return 0;
fail2:
-#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
-#endif
musb_platform_exit(musb);
fail:
dev_err(musb->controller,
@@ -2127,6 +2109,7 @@
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq(pdev, 0);
+ int status;
struct resource *iomem;
void __iomem *base;
@@ -2134,7 +2117,7 @@
if (!iomem || irq == 0)
return -ENODEV;
- base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ base = ioremap(iomem->start, resource_size(iomem));
if (!base) {
dev_err(dev, "ioremap failed\n");
return -ENOMEM;
@@ -2144,7 +2127,12 @@
/* clobbered by use_dma=n */
orig_dma_mask = dev->dma_mask;
#endif
- return musb_init_controller(dev, irq, base);
+
+ status = musb_init_controller(dev, irq, base);
+ if (status < 0)
+ iounmap(base);
+
+ return status;
}
static int __exit musb_remove(struct platform_device *pdev)
@@ -2173,6 +2161,148 @@
#ifdef CONFIG_PM
+static struct musb_context_registers musb_context;
+
+void musb_save_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
+ musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ }
+ musb_context.power = musb_readb(musb_base, MUSB_POWER);
+ musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ musb_context.index = musb_readb(musb_base, MUSB_INDEX);
+ musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_context.index_regs[i].txmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_TXMAXP);
+ musb_context.index_regs[i].txcsr =
+ musb_readw(musb_base, 0x10 + MUSB_TXCSR);
+ musb_context.index_regs[i].rxmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_RXMAXP);
+ musb_context.index_regs[i].rxcsr =
+ musb_readw(musb_base, 0x10 + MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ musb_context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ musb_context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ musb_context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ musb_context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ musb_context.index_regs[i].txtype =
+ musb_readb(musb_base, 0x10 + MUSB_TXTYPE);
+ musb_context.index_regs[i].txinterval =
+ musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL);
+ musb_context.index_regs[i].rxtype =
+ musb_readb(musb_base, 0x10 + MUSB_RXTYPE);
+ musb_context.index_regs[i].rxinterval =
+ musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL);
+
+ musb_context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ musb_context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ musb_context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ musb_context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+
+ musb_platform_save_context(musb, &musb_context);
+}
+
+void musb_restore_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *ep_target_regs;
+
+ musb_platform_restore_context(musb, &musb_context);
+
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
+ }
+ musb_writeb(musb_base, MUSB_POWER, musb_context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(musb_base, 0x10 + MUSB_TXMAXP,
+ musb_context.index_regs[i].txmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_TXCSR,
+ musb_context.index_regs[i].txcsr);
+ musb_writew(musb_base, 0x10 + MUSB_RXMAXP,
+ musb_context.index_regs[i].rxmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_RXCSR,
+ musb_context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ musb_context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ musb_context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ musb_context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ musb_context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(musb_base, 0x10 + MUSB_TXTYPE,
+ musb_context.index_regs[i].txtype);
+ musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL,
+ musb_context.index_regs[i].txinterval);
+ musb_writeb(musb_base, 0x10 + MUSB_RXTYPE,
+ musb_context.index_regs[i].rxtype);
+ musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL,
+
+ musb_context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ musb_context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ musb_context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ musb_context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ musb_context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ musb_context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ musb_context.index_regs[i].rxhubport);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+}
+
static int musb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2194,6 +2324,8 @@
*/
}
+ musb_save_context(musb);
+
if (musb->set_clock)
musb->set_clock(musb->clock, 0);
else
@@ -2215,6 +2347,8 @@
else
clk_enable(musb->clock);
+ musb_restore_context(musb);
+
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5514c7e..d849fb8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -52,6 +52,15 @@
struct musb_hw_ep;
struct musb_ep;
+/* Helper defines for struct musb->hwvers */
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_1900 0x784
+#define MUSB_HWVERS_2000 0x800
#include "musb_debug.h"
#include "musb_dma.h"
@@ -322,13 +331,6 @@
struct clk *clock;
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
-#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
-#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
-#define MUSB_HWVERS_RC 0x8000
-#define MUSB_HWVERS_1300 0x52C
-#define MUSB_HWVERS_1400 0x590
-#define MUSB_HWVERS_1800 0x720
-#define MUSB_HWVERS_2000 0x800
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -411,22 +413,15 @@
unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+ unsigned dyn_fifo:1; /* dynamic FIFO supported? */
-#ifdef C_MP_TX
- unsigned bulk_split:1;
+ unsigned bulk_split:1;
#define can_bulk_split(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#else
-#define can_bulk_split(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#ifdef C_MP_RX
- unsigned bulk_combine:1;
+ unsigned bulk_combine:1;
#define can_bulk_combine(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#else
-#define can_bulk_combine(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
@@ -461,6 +456,45 @@
#endif
};
+#ifdef CONFIG_PM
+struct musb_csr_regs {
+ /* FIFO registers */
+ u16 txmaxp, txcsr, rxmaxp, rxcsr;
+ u16 rxfifoadd, txfifoadd;
+ u8 txtype, txinterval, rxtype, rxinterval;
+ u8 rxfifosz, txfifosz;
+ u8 txfunaddr, txhubaddr, txhubport;
+ u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+ u32 otg_sysconfig, otg_forcestandby;
+#endif
+ u8 power;
+ u16 intrtxe, intrrxe;
+ u8 intrusbe;
+ u16 frame;
+ u8 index, testmode;
+
+ u8 devctl, misc;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+extern void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+extern void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+#else
+#define musb_platform_save_context(m, x) do {} while (0)
+#define musb_platform_restore_context(m, x) do {} while (0)
+#endif
+
+#endif
+
static inline void musb_set_vbus(struct musb *musb, int is_on)
{
musb->board_set_vbus(musb, is_on);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index cbcf14a2..a9f288c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -895,7 +895,14 @@
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
- musb_writew(regs, MUSB_TXMAXP, tmp);
+ /* Set TXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode. Currently, It seems that double
+ * buffering has problem if musb RTL revision number < 2.0.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_TXMAXP, tmp);
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@@ -925,7 +932,13 @@
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
- musb_writew(regs, MUSB_RXMAXP, tmp);
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
+ else
+ musb_writew(regs, MUSB_RXMAXP, tmp);
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@@ -1697,8 +1710,7 @@
return -EINVAL;
/* driver must be initialized to support peripheral mode */
- if (!musb || !(musb->board_mode == MUSB_OTG
- || musb->board_mode != MUSB_OTG)) {
+ if (!musb) {
DBG(1, "%s, no dev??\n", __func__);
return -ENODEV;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 74c4c36..3421cf9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -605,8 +605,14 @@
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
- musb_writew(ep->regs, MUSB_RXMAXP,
- qh->maxpacket | ((qh->hb_mult - 1) << 11));
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffer mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
+ else
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
@@ -1771,6 +1777,9 @@
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
+ u8 toggle;
+ u8 txtype;
+ struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1809,6 +1818,27 @@
diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
+
+ /*
+ * Mentor controller has a bug in that if we schedule
+ * a BULK Tx transfer on an endpoint that had earlier
+ * handled ISOC then the BULK transfer has to start on
+ * a zero toggle. If the BULK transfer starts on a 1
+ * toggle then this transfer will fail as the mentor
+ * controller starts the Bulk transfer on a 0 toggle
+ * irrespective of the programming of the toggle bits
+ * in the TXCSR register. Check for this condition
+ * while allocating the EP for a Tx Bulk transfer. If
+ * so skip this EP.
+ */
+ hw_ep = musb->endpoints + epnum;
+ toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+ txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
+ >> 4) & 0x3;
+ if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
+ toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
+ continue;
+
best_diff = diff;
best_end = epnum;
}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 473a94e..292894a 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,10 @@
#define MUSB_DEVCTL_HR 0x02
#define MUSB_DEVCTL_SESSION 0x01
+/* MUSB ULPI VBUSCONTROL */
+#define MUSB_ULPI_USE_EXTVBUS 0x01
+#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -246,6 +250,7 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
+#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -321,6 +326,26 @@
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_TXFIFOSZ);
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_TXFIFOADD);
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_RXFIFOSZ);
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_RXFIFOADD);
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
musb_writeb(mbase, MUSB_INDEX, 0);
@@ -376,6 +401,36 @@
qh_h_port_reg);
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
+}
+
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
+}
+
#else /* CONFIG_BLACKFIN */
#define USB_BASE USB_FADDR
@@ -455,6 +510,22 @@
{
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
return 0;
@@ -462,7 +533,11 @@
static inline u16 musb_read_hwvers(void __iomem *mbase)
{
- return 0;
+ /*
+ * This register is invisible on Blackfin, actually the MUSB
+ * RTL version of Blackfin is 1.9, so just harcode its value.
+ */
+ return MUSB_HWVERS_1900;
}
static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@ -500,6 +575,30 @@
{
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
#endif /* CONFIG_BLACKFIN */
#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a237550..2fa7d5c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -250,20 +250,39 @@
u8 bchannel;
u8 int_hsdma;
- u32 addr;
+ u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
- if (!int_hsdma)
- goto done;
#ifdef CONFIG_BLACKFIN
/* Clear DMA interrupt flags */
musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif
+ if (!int_hsdma) {
+ DBG(2, "spurious DMA irq\n");
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ count = musb_read_hsdma_count(mbase, bchannel);
+
+ if (count == 0)
+ int_hsdma |= (1 << bchannel);
+ }
+ }
+
+ DBG(2, "int_hsdma = 0x%x\n", int_hsdma);
+
+ if (!int_hsdma)
+ goto done;
+ }
+
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92..613f95a 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -55,6 +55,10 @@
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
addr)
+#define musb_read_hsdma_count(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
+
#define musb_write_hsdma_count(mbase, bchannel, len) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -96,6 +100,19 @@
((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
}
+static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
+{
+ u32 count = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+ count = count << 16;
+
+ count |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+ return count;
+}
+
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 83beeac..3fe1686 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -220,7 +220,7 @@
musb_platform_resume(musb);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
l &= ~NOSTDBY; /* remove possible nostdby */
l |= SMARTSTDBY; /* enable smart standby */
@@ -233,17 +233,19 @@
*/
if (!cpu_is_omap3430())
l |= AUTOIDLE; /* enable auto idle */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_INTERFSEL);
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
l |= ULPI_12PIN;
- omap_writel(l, OTG_INTERFSEL);
+ musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
- omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
- omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
- omap_readl(OTG_SIMENABLE));
+ musb_readl(musb->mregs, OTG_REVISION),
+ musb_readl(musb->mregs, OTG_SYSCONFIG),
+ musb_readl(musb->mregs, OTG_SYSSTATUS),
+ musb_readl(musb->mregs, OTG_INTERFSEL),
+ musb_readl(musb->mregs, OTG_SIMENABLE));
omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
@@ -255,6 +257,22 @@
return 0;
}
+#ifdef CONFIG_PM
+void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
+ musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
+}
+
+void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
+}
+#endif
+
int musb_platform_suspend(struct musb *musb)
{
u32 l;
@@ -263,13 +281,13 @@
return 0;
/* in any role */
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l |= ENABLEFORCE; /* enable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l |= ENABLEWAKEUP; /* enable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
otg_set_suspend(musb->xceiv, 1);
@@ -295,13 +313,13 @@
else
clk_enable(musb->clock);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l &= ~ENABLEFORCE; /* disable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
return 0;
}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index fbede77..40b3c02 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
#ifndef __MUSB_OMAP243X_H__
#define __MUSB_OMAP243X_H__
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include <mach/hardware.h>
#include <plat/usb.h>
/*
* OMAP2430-specific definitions
*/
-#define MENTOR_BASE_OFFSET 0
-#if defined(CONFIG_ARCH_OMAP2430)
-#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
-#elif defined(CONFIG_ARCH_OMAP3430)
-#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
-#endif
-#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
-#define OTG_REVISION OMAP_HSOTG(0x0)
-#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+#define OTG_REVISION 0x400
+
+#define OTG_SYSCONFIG 0x404
# define MIDLEMODE 12 /* bit position */
# define FORCESTDBY (0 << MIDLEMODE)
# define NOSTDBY (1 << MIDLEMODE)
# define SMARTSTDBY (2 << MIDLEMODE)
+
# define SIDLEMODE 3 /* bit position */
# define FORCEIDLE (0 << SIDLEMODE)
# define NOIDLE (1 << SIDLEMODE)
# define SMARTIDLE (2 << SIDLEMODE)
+
# define ENABLEWAKEUP (1 << 2)
# define SOFTRST (1 << 1)
# define AUTOIDLE (1 << 0)
-#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+
+#define OTG_SYSSTATUS 0x408
# define RESETDONE (1 << 0)
-#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+
+#define OTG_INTERFSEL 0x40c
# define EXTCP (1 << 2)
-# define PHYSEL 0 /* bit position */
+# define PHYSEL 0 /* bit position */
# define UTMI_8BIT (0 << PHYSEL)
# define ULPI_12PIN (1 << PHYSEL)
# define ULPI_8PIN (2 << PHYSEL)
-#define OTG_SIMENABLE OMAP_HSOTG(0x10)
-# define TM1 (1 << 0)
-#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
-# define ENABLEFORCE (1 << 0)
-#endif /* CONFIG_ARCH_OMAP2430 */
+#define OTG_SIMENABLE 0x410
+# define TM1 (1 << 0)
+
+#define OTG_FORCESTDBY 0x414
+# define ENABLEFORCE (1 << 0)
#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 88b587c..ab776a8 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1118,7 +1118,7 @@
}
musb->sync = mem->start;
- sync = ioremap(mem->start, mem->end - mem->start + 1);
+ sync = ioremap(mem->start, resource_size(mem));
if (!sync) {
pr_debug("ioremap for sync failed\n");
ret = -ENOMEM;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e13c770..1c86809 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -648,7 +648,7 @@
}
}
- if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
omap_free_dma(tusb_dma->ch);
kfree(tusb_dma);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 2be9f2f..3e4e9f4 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -36,7 +36,7 @@
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-
+#include <linux/notifier.h>
/* Register defines */
@@ -236,15 +236,6 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
-
-
-enum linkstat {
- USB_LINK_UNKNOWN = 0,
- USB_LINK_NONE,
- USB_LINK_VBUS,
- USB_LINK_ID,
-};
-
struct twl4030_usb {
struct otg_transceiver otg;
struct device *dev;
@@ -347,10 +338,10 @@
/*-------------------------------------------------------------------------*/
-static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
+static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
{
int status;
- int linkstat = USB_LINK_UNKNOWN;
+ int linkstat = USB_EVENT_NONE;
/*
* For ID/VBUS sensing, see manual section 15.4.8 ...
@@ -368,11 +359,11 @@
dev_err(twl->dev, "USB link status err %d\n", status);
else if (status & (BIT(7) | BIT(2))) {
if (status & BIT(2))
- linkstat = USB_LINK_ID;
+ linkstat = USB_EVENT_ID;
else
- linkstat = USB_LINK_VBUS;
+ linkstat = USB_EVENT_VBUS;
} else
- linkstat = USB_LINK_NONE;
+ linkstat = USB_EVENT_NONE;
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
@@ -383,7 +374,7 @@
spin_lock_irq(&twl->lock);
twl->linkstat = linkstat;
- if (linkstat == USB_LINK_ID) {
+ if (linkstat == USB_EVENT_ID) {
twl->otg.default_a = true;
twl->otg.state = OTG_STATE_A_IDLE;
} else {
@@ -564,7 +555,7 @@
spin_lock_irqsave(&twl->lock, flags);
ret = sprintf(buf, "%s\n",
- (twl->linkstat == USB_LINK_VBUS) ? "on" : "off");
+ (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off");
spin_unlock_irqrestore(&twl->lock, flags);
return ret;
@@ -576,17 +567,8 @@
struct twl4030_usb *twl = _twl;
int status;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
status = twl4030_usb_linkstat(twl);
- if (status != USB_LINK_UNKNOWN) {
-
+ if (status >= 0) {
/* FIXME add a set_power() method so that B-devices can
* configure the charger appropriately. It's not always
* correct to consume VBUS power, and how much current to
@@ -598,12 +580,13 @@
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
- if (status == USB_LINK_NONE)
+ if (status == USB_EVENT_NONE)
twl4030_phy_suspend(twl, 0);
else
twl4030_phy_resume(twl);
- twl4030charger_usb_en(status == USB_LINK_VBUS);
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -693,6 +676,8 @@
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
+ BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
@@ -702,7 +687,7 @@
* need both handles, otherwise just one suffices.
*/
twl->irq_enabled = true;
- status = request_irq(twl->irq, twl4030_usb_irq,
+ status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_usb", twl);
if (status < 0) {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c480ea4..c78b255 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,17 @@
To compile this driver as a module, choose M here: the
module will be called oti6858.
+config USB_SERIAL_QCAUX
+ tristate "USB Qualcomm Auxiliary Serial Port Driver"
+ ---help---
+ Say Y here if you want to use the auxiliary serial ports provided
+ by many modems based on Qualcomm chipsets. These ports often use
+ a proprietary protocol called DM and cannot be used for AT- or
+ PPP-based communication.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moto_modem. If unsure, choose N.
+
config USB_SERIAL_QUALCOMM
tristate "USB Qualcomm Serial modem"
help
@@ -600,6 +611,14 @@
To compile this driver as a module, choose M here: the
module will be called opticon.
+config USB_SERIAL_VIVOPAY_SERIAL
+ tristate "USB ViVOpay serial interface driver"
+ help
+ Say Y here if you want to use a ViVOtech ViVOpay USB device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vivopay-serial.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 66619be..83c9e43 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -45,6 +45,7 @@
obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
+obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
@@ -55,4 +56,5 @@
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
+obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b10ac84..365db10 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -78,7 +78,7 @@
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
{ },
};
@@ -468,10 +468,6 @@
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
@@ -530,23 +526,19 @@
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
+ /* Schedule the next read */
+ usb_fill_bulk_urb(port->read_urb, port->serial->dev,
+ usb_rcvbulkpipe(port->serial->dev,
+ port->bulk_in_endpointAddress),
+ port->read_urb->transfer_buffer,
+ port->read_urb->transfer_buffer_length,
+ aircable_read_bulk_callback, port);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- }
-
- return;
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev,
+ "%s - failed resubmitting read urb, error %d\n",
+ __func__, result);
}
/* Based on ftdi_sio.c throttle */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a9c2dec..547c944 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -50,7 +50,7 @@
/* usb timeout of 1 second */
#define ARK_TIMEOUT (1*HZ)
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
@@ -733,7 +733,6 @@
tty = tty_port_tty_get(&port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (unlikely(lsr & UART_LSR_OE))
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index a0467bc..1295e44 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -103,7 +103,7 @@
unsigned int set, unsigned int clear);
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 59eff72..9f4fed1 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <asm/unaligned.h>
#define DEFAULT_BAUD_RATE 9600
#define DEFAULT_TIMEOUT 1000
@@ -70,7 +71,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
{ },
@@ -392,17 +393,23 @@
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
- uint8_t break_reg[2];
+ uint8_t *break_reg;
dbg("%s()", __func__);
- r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
- ch341_break_reg, 0, break_reg, sizeof(break_reg));
- if (r < 0) {
- printk(KERN_WARNING "%s: USB control read error whilst getting"
- " break register contents.\n", __FILE__);
+ break_reg = kmalloc(2, GFP_KERNEL);
+ if (!break_reg) {
+ dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
return;
}
+
+ r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
+ ch341_break_reg, 0, break_reg, 2);
+ if (r < 0) {
+ dev_err(&port->dev, "%s - USB control read error (%d)\n",
+ __func__, r);
+ goto out;
+ }
dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
if (break_state != 0) {
@@ -416,12 +423,14 @@
}
dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
- reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8);
+ reg_contents = get_unaligned_le16(break_reg);
r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
ch341_break_reg, reg_contents);
if (r < 0)
- printk(KERN_WARNING "%s: USB control write error whilst setting"
- " break register contents.\n", __FILE__);
+ dev_err(&port->dev, "%s - USB control write error (%d)\n",
+ __func__, r);
+out:
+ kfree(break_reg);
}
static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bd254ec..507382b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -55,7 +55,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -91,11 +91,12 @@
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
- { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -612,7 +613,7 @@
baud);
if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
- dbg("Baud rate requested not supported by device\n");
+ dbg("Baud rate requested not supported by device");
baud = tty_termios_baud_rate(old_termios);
}
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index b0f6402..f744ab7 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -70,7 +70,7 @@
static void cyberjack_read_bulk_callback(struct urb *urb);
static void cyberjack_write_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -391,11 +391,10 @@
tty = tty_port_tty_get(&port->port);
if (!tty) {
- dbg("%s - ignoring since device not open\n", __func__);
+ dbg("%s - ignoring since device not open", __func__);
return;
}
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a591ebe..baf74b4 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -66,17 +66,15 @@
#include <linux/serial.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include "cypress_m8.h"
-#ifdef CONFIG_USB_SERIAL_DEBUG
- static int debug = 1;
-#else
- static int debug;
-#endif
+static int debug;
static int stats;
static int interval;
+static int unstable_bauds;
/*
* Version Information
@@ -89,24 +87,24 @@
#define CYPRESS_BUF_SIZE 1024
#define CYPRESS_CLOSING_WAIT (30*HZ)
-static struct usb_device_id id_table_earthmate [] = {
+static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_cyphidcomrs232 [] = {
+static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_nokiaca42v2 [] = {
+static const struct usb_device_id id_table_nokiaca42v2[] = {
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@ -295,6 +293,9 @@
struct cypress_private *priv;
priv = usb_get_serial_port_data(port);
+ if (unstable_bauds)
+ return new_rate;
+
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -344,7 +345,8 @@
{
int new_baudrate = 0, retval = 0, tries = 0;
struct cypress_private *priv;
- __u8 feature_buffer[5];
+ u8 *feature_buffer;
+ const unsigned int feature_len = 5;
unsigned long flags;
dbg("%s", __func__);
@@ -354,17 +356,18 @@
if (!priv->comm_is_ok)
return -ENODEV;
+ feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
+ if (!feature_buffer)
+ return -ENOMEM;
+
switch (cypress_request_type) {
case CYPRESS_SET_CONFIG:
- new_baudrate = priv->baud_rate;
/* 0 means 'Hang up' so doesn't change the true bit rate */
- if (baud_rate == 0)
- new_baudrate = priv->baud_rate;
- /* Change of speed ? */
- else if (baud_rate != priv->baud_rate) {
+ new_baudrate = priv->baud_rate;
+ if (baud_rate && baud_rate != priv->baud_rate) {
dbg("%s - baud rate is changing", __func__);
retval = analyze_baud_rate(port, baud_rate);
- if (retval >= 0) {
+ if (retval >= 0) {
new_baudrate = retval;
dbg("%s - New baud rate set to %d",
__func__, new_baudrate);
@@ -373,9 +376,8 @@
dbg("%s - baud rate is being sent as %d",
__func__, new_baudrate);
- memset(feature_buffer, 0, sizeof(feature_buffer));
/* fill the feature_buffer with new configuration */
- *((u_int32_t *)feature_buffer) = new_baudrate;
+ put_unaligned_le32(new_baudrate, feature_buffer);
feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
/* 1 bit gap */
feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
@@ -397,15 +399,15 @@
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer) &&
+ } while (retval != feature_len &&
retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed sending serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
@@ -425,43 +427,42 @@
/* Not implemented for this device,
and if we try to do it we're likely
to crash the hardware. */
- return -ENOTTY;
+ retval = -ENOTTY;
+ goto out;
}
dbg("%s - retreiving serial line settings", __func__);
- /* set initial values in feature buffer */
- memset(feature_buffer, 0, sizeof(feature_buffer));
-
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
HID_REQ_GET_REPORT,
USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer)
+ } while (retval != feature_len
&& retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed to retrieve serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
- return retval;
+ goto out;
} else {
spin_lock_irqsave(&priv->lock, flags);
/* store the config in one byte, and later
use bit masks to check values */
priv->current_config = feature_buffer[4];
- priv->baud_rate = *((u_int32_t *)feature_buffer);
+ priv->baud_rate = get_unaligned_le32(feature_buffer);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
++priv->cmd_count;
spin_unlock_irqrestore(&priv->lock, flags);
-
+out:
+ kfree(feature_buffer);
return retval;
} /* cypress_serial_control */
@@ -690,7 +691,6 @@
{
struct cypress_private *priv = usb_get_serial_port_data(port);
/* drop dtr and rts */
- priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
if (on == 0)
priv->line_control = 0;
@@ -1307,13 +1307,9 @@
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
- if (tty && (bytes > i)) {
- bytes = tty_buffer_request_room(tty, bytes);
- for (; i < bytes ; ++i) {
- dbg("pushing byte number %d - %d - %c", i, data[i],
- data[i]);
- tty_insert_flip_char(tty, data[i], tty_flag);
- }
+ if (tty && bytes > i) {
+ tty_insert_flip_string_fixed_flag(tty, data + i,
+ bytes - i, tty_flag);
tty_flip_buffer_push(tty);
}
@@ -1325,9 +1321,9 @@
continue_read:
tty_kref_put(tty);
- /* Continue trying to always read... unless the port has closed. */
+ /* Continue trying to always read */
- if (port->port.count > 0 && priv->comm_is_ok) {
+ if (priv->comm_is_ok) {
usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
usb_rcvintpipe(port->serial->dev,
port->interrupt_in_endpointAddress),
@@ -1336,7 +1332,7 @@
cypress_read_int_callback, port,
priv->read_urb_interval);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result) {
+ if (result && result != -EPERM) {
dev_err(&urb->dev->dev, "%s - failed resubmitting "
"read urb, error %d\n", __func__,
result);
@@ -1650,3 +1646,5 @@
MODULE_PARM_DESC(stats, "Enable statistics or not");
module_param(interval, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(interval, "Overrides interrupt interval");
+module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68e80be..68b0aa5 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -470,18 +470,18 @@
static int debug;
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_2 [] = {
+static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_4 [] = {
+static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
@@ -1262,10 +1262,10 @@
return;
}
- /* try to send any buffered data on this port, if it is open */
+ /* try to send any buffered data on this port */
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
- if (port->port.count && priv->dp_out_buf_len > 0) {
+ if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1288,7 +1288,7 @@
schedule_work(&priv->dp_wakeup_work);
spin_unlock(&priv->dp_port_lock);
- if (ret)
+ if (ret && ret != -EPERM)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1353,8 +1353,7 @@
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
- dbg("digi_open: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_open: TOP: port=%d", priv->dp_port_num);
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
@@ -1393,8 +1392,7 @@
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
- dbg("digi_close: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_close: TOP: port=%d", priv->dp_port_num);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
@@ -1629,7 +1627,7 @@
/* continue read */
urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret != 0) {
+ if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1658,12 +1656,11 @@
int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
int flag, throttled;
- int i;
int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
- if (port->port.count == 0)
+ if (urb->status == -ENOENT)
return 0;
/* short/multiple packet check */
@@ -1705,17 +1702,9 @@
/* data length is len-1 (one byte of len is port_status) */
--len;
-
- len = tty_buffer_request_room(tty, len);
if (len > 0) {
- /* Hot path */
- if (flag == TTY_NORMAL)
- tty_insert_flip_string(tty, data, len);
- else {
- for (i = 0; i < len; i++)
- tty_insert_flip_char(tty,
- data[i], flag);
- }
+ tty_insert_flip_string_fixed_flag(tty, data, len,
+ flag);
tty_flip_buffer_push(tty);
}
}
@@ -1776,8 +1765,7 @@
tty = tty_port_tty_get(&port->port);
rts = 0;
- if (port->port.count)
- rts = tty->termios->c_cflag & CRTSCTS;
+ rts = tty->termios->c_cflag & CRTSCTS;
if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 7dd0e3e..5f740a1 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -93,7 +93,7 @@
static void empeg_write_bulk_callback(struct urb *urb);
static void empeg_read_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -346,7 +346,6 @@
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7638828..6af0dfa 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,12 +33,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
@@ -88,10 +88,10 @@
unsigned int latency; /* latency setting in use */
spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_bytes;
unsigned long tx_outstanding_bytes;
unsigned long tx_outstanding_urbs;
unsigned short max_packet_size;
+ struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -614,6 +614,7 @@
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -737,6 +738,10 @@
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -812,7 +817,7 @@
.name = "ftdi_sio",
},
.description = "FTDI USB Serial Device",
- .usb_driver = &ftdi_driver ,
+ .usb_driver = &ftdi_driver,
.id_table = id_table_combined,
.num_ports = 1,
.probe = ftdi_sio_probe,
@@ -828,8 +833,8 @@
.chars_in_buffer = ftdi_chars_in_buffer,
.read_bulk_callback = ftdi_read_bulk_callback,
.write_bulk_callback = ftdi_write_bulk_callback,
- .tiocmget = ftdi_tiocmget,
- .tiocmset = ftdi_tiocmset,
+ .tiocmget = ftdi_tiocmget,
+ .tiocmset = ftdi_tiocmset,
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
@@ -935,7 +940,6 @@
unsigned int clear)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
unsigned urb_value;
int rv;
@@ -944,10 +948,6 @@
return 0; /* no change */
}
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (clear & TIOCM_DTR)
@@ -963,9 +963,7 @@
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
urb_value, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s",
__func__,
@@ -1124,16 +1122,11 @@
static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
__u16 urb_value;
__u16 urb_index;
__u32 urb_index_value;
int rv;
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
urb_index_value = get_ftdi_divisor(tty, port);
urb_value = (__u16)urb_index_value;
urb_index = (__u16)(urb_index_value >> 16);
@@ -1146,9 +1139,7 @@
FTDI_SIO_SET_BAUDRATE_REQUEST,
FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
urb_value, urb_index,
- buf, 0, WDR_SHORT_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_SHORT_TIMEOUT);
return rv;
}
@@ -1156,8 +1147,7 @@
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
- int rv = 0;
+ int rv;
int l = priv->latency;
if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1170,8 +1160,7 @@
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
l, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
return rv;
@@ -1181,24 +1170,29 @@
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- unsigned short latency = 0;
- int rv = 0;
-
+ unsigned char *buf;
+ int rv;
dbg("%s", __func__);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
rv = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
- (char *) &latency, 1, WDR_TIMEOUT);
-
- if (rv < 0) {
+ buf, 1, WDR_TIMEOUT);
+ if (rv < 0)
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- return -EIO;
- }
- return latency;
+ else
+ priv->latency = buf[0];
+
+ kfree(buf);
+
+ return rv;
}
static int get_serial_info(struct usb_serial_port *port,
@@ -1229,7 +1223,7 @@
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
- lock_kernel();
+ mutex_lock(&priv->cfg_lock);
old_priv = *priv;
/* Do error checking and permission checking */
@@ -1237,7 +1231,7 @@
if (!capable(CAP_SYS_ADMIN)) {
if (((new_serial.flags & ~ASYNC_USR_MASK) !=
(priv->flags & ~ASYNC_USR_MASK))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EPERM;
}
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
@@ -1248,7 +1242,7 @@
if ((new_serial.baud_base != priv->baud_base) &&
(new_serial.baud_base < 9600)) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EINVAL;
}
@@ -1278,11 +1272,11 @@
(priv->flags & ASYNC_SPD_MASK)) ||
(((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
(old_priv.custom_divisor != priv->custom_divisor))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
change_speed(tty, port);
}
else
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return 0;
} /* set_serial_info */
@@ -1338,20 +1332,20 @@
__func__);
}
} else if (version < 0x200) {
- /* Old device. Assume its the original SIO. */
+ /* Old device. Assume it's the original SIO. */
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
priv->write_offset = 1;
} else if (version < 0x400) {
- /* Assume its an FT8U232AM (or FT8U245AM) */
+ /* Assume it's an FT8U232AM (or FT8U245AM) */
/* (It might be a BM because of the iSerialNumber bug,
* but it will still work as an AM device.) */
priv->chip_type = FT8U232AM;
} else if (version < 0x600) {
- /* Assume its an FT232BM (or FT245BM) */
+ /* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
} else {
- /* Assume its an FT232R */
+ /* Assume it's an FT232R */
priv->chip_type = FT232RL;
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
@@ -1371,7 +1365,7 @@
struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
unsigned num_endpoints;
- int i = 0;
+ int i;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -1423,7 +1417,7 @@
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
priv->latency = v;
rv = write_latency_timer(port);
@@ -1440,9 +1434,8 @@
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
dbg("%s: setting event char = %i", __func__, v);
@@ -1451,8 +1444,7 @@
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
v, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("Unable to write event character: %i", rv);
return -EIO;
@@ -1551,9 +1543,9 @@
kref_init(&priv->kref);
spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->cfg_lock);
init_waitqueue_head(&priv->delta_msr_wait);
- /* This will push the characters through immediately rather
- than queue a task to deliver them */
+
priv->flags = ASYNC_LOW_LATENCY;
if (quirk && quirk->port_probe)
@@ -1585,7 +1577,8 @@
ftdi_determine_type(port);
ftdi_set_max_packet_size(port);
- read_latency_timer(port);
+ if (read_latency_timer(port) < 0)
+ priv->latency = 16;
create_sysfs_attrs(port);
return 0;
}
@@ -1630,8 +1623,6 @@
{
struct usb_device *udev = serial->dev;
int latency = ndi_latency_timer;
- int rv = 0;
- char buf[1];
if (latency == 0)
latency = 1;
@@ -1641,10 +1632,11 @@
dbg("%s setting NDI device latency to %d", __func__, latency);
dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
- rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ /* FIXME: errors are not returned */
+ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- latency, 0, buf, 0, WDR_TIMEOUT);
+ latency, 0, NULL, 0, WDR_TIMEOUT);
return 0;
}
@@ -1720,7 +1712,7 @@
urb->transfer_buffer_length,
ftdi_read_bulk_callback, port);
result = usb_submit_urb(urb, mem_flags);
- if (result)
+ if (result && result != -EPERM)
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
@@ -1732,16 +1724,10 @@
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
-
- int result = 0;
- char buf[1]; /* Needed for the usb_control_msg I think */
+ int result;
dbg("%s", __func__);
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_bytes = 0;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
write_latency_timer(port);
/* No error checking for this (will get errors later anyway) */
@@ -1749,7 +1735,7 @@
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
- priv->interface, buf, 0, WDR_TIMEOUT);
+ priv->interface, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
@@ -1777,7 +1763,6 @@
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char buf[1];
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected) {
@@ -1786,7 +1771,7 @@
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, buf, 0,
+ 0, priv->interface, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
@@ -1847,7 +1832,7 @@
spin_lock_irqsave(&priv->tx_lock, flags);
if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->tx_outstanding_urbs++;
@@ -1927,7 +1912,6 @@
} else {
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_outstanding_bytes += count;
- priv->tx_bytes += count;
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
@@ -2154,8 +2138,7 @@
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- __u16 urb_value = 0;
- char buf[1];
+ __u16 urb_value;
/* break_state = -1 to turn on break, and 0 to turn off break */
/* see drivers/char/tty_io.c to see it used */
@@ -2171,7 +2154,7 @@
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state "
"(state was %d)\n", __func__, break_state);
}
@@ -2195,7 +2178,6 @@
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
__u16 urb_value; /* will hold the new flags */
- char buf[1]; /* Perhaps I should dynamically alloc this? */
/* Added for xon/xoff support */
unsigned int iflag = termios->c_iflag;
@@ -2246,12 +2228,10 @@
}
if (cflag & CSIZE) {
switch (cflag & CSIZE) {
- case CS5: urb_value |= 5; dbg("Setting CS5"); break;
- case CS6: urb_value |= 6; dbg("Setting CS6"); break;
case CS7: urb_value |= 7; dbg("Setting CS7"); break;
case CS8: urb_value |= 8; dbg("Setting CS8"); break;
default:
- dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n");
+ dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
}
}
@@ -2263,7 +2243,7 @@
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_SHORT_TIMEOUT) < 0) {
+ NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to set "
"databits/stopbits/parity\n", __func__);
}
@@ -2275,7 +2255,7 @@
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"%s error from disable flowcontrol urb\n",
__func__);
@@ -2301,7 +2281,7 @@
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to set to rts/cts flow control\n");
}
@@ -2333,7 +2313,7 @@
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
urb_value , (FTDI_SIO_XON_XOFF_HS
| priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "urb failed to set to "
"xon/xoff flow control\n");
}
@@ -2347,7 +2327,7 @@
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to clear flow control\n");
}
@@ -2361,21 +2341,22 @@
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- unsigned char buf[2];
+ unsigned char *buf;
+ int len;
int ret;
dbg("%s TIOCMGET", __func__);
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ /*
+ * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
+ * the same format as the data returned from the in point.
+ */
switch (priv->chip_type) {
case SIO:
- /* Request the status from the device */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, 0,
- buf, 1, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 1;
break;
case FT8U232AM:
case FT232BM:
@@ -2383,27 +2364,30 @@
case FT232RL:
case FT2232H:
case FT4232H:
- /* the 8U232AM returns a two byte value (the sio is a 1 byte
- value) - in the same format as the data returned from the in
- point */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, priv->interface,
- buf, 2, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 2;
break;
default:
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
- return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
+ ret = usb_control_msg(port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST,
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
+ 0, priv->interface,
+ buf, len, WDR_TIMEOUT);
+ if (ret < 0)
+ goto out;
+
+ ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
(buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
(buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
(buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
priv->last_dtr_rts;
+out:
+ kfree(buf);
+ return ret;
}
static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
@@ -2508,8 +2492,7 @@
port->throttled = port->throttle_req = 0;
spin_unlock_irqrestore(&port->lock, flags);
- /* Resubmit urb if throttled and open. */
- if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (was_throttled)
ftdi_submit_read_urb(port, GFP_KERNEL);
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index b0e0d64..ff9bf80 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -28,13 +28,13 @@
#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
-#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */
+#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
-/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
#define INTERFACE_B 2
#define INTERFACE_C 3
@@ -270,7 +270,7 @@
* BmRequestType: 0100 0000b
* bRequest: FTDI_SIO_SET_FLOW_CTRL
* wValue: Xoff/Xon
- * wIndex: Protocol/Port - hIndex is protocl / lIndex is port
+ * wIndex: Protocol/Port - hIndex is protocol / lIndex is port
* wLength: 0
* Data: None
*
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c8951ae..0727e19 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,7 +22,7 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
@@ -49,7 +49,7 @@
#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
-#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
/* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8
@@ -185,7 +185,7 @@
#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
-#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */
#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
@@ -212,8 +212,8 @@
* drivers, or possibly the Comedi drivers in some cases. */
#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */
#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
/*
@@ -320,7 +320,7 @@
/*
* 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
+ * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices
* and I'm not entirely sure which are used by which.
*/
#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
@@ -330,10 +330,10 @@
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
-#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
-#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
+#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
+#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
+#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
/*
* Oceanic product ids
@@ -494,6 +494,13 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID 0x06CE /* Vendor ID */
+#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+/*
* Definitions for B&B Electronics products.
*/
#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
@@ -642,7 +649,7 @@
#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
-/* Larsen and Brusgaard AltiTrack/USBtrack */
+/* Larsen and Brusgaard AltiTrack/USBtrack */
#define LARSENBRUSGAARD_VID 0x0FD8
#define LB_ALTITRACK_PID 0x0001
@@ -971,7 +978,7 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
- * Dresden Elektronic Sensor Terminal Board
+ * Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
#define STB_PID 0x0001 /* Sensor Terminal Board */
@@ -1002,3 +1009,11 @@
#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
+
+/*
+ * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
+ */
+#define MJSG_GENERIC_PID 0x9378
+#define MJSG_SR_RADIO_PID 0x9379
+#define MJSG_XM_RADIO_PID 0x937A
+#define MJSG_HD_RADIO_PID 0x937C
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d30f736..e21ce9d 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -18,7 +18,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
{ },
};
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 5ac900e..a42b29a 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -210,7 +210,7 @@
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
@@ -271,7 +271,6 @@
usb_serial_debug_data(debug, &port->dev,
__func__, actual_length, data);
- tty_buffer_request_room(tty, actual_length);
tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 83443d6..89fac36 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
+#include <linux/serial.h>
static int debug;
@@ -41,7 +42,7 @@
/* we want to look at all devices, as the vendor/product id can change
* depending on the command line argument */
-static struct usb_device_id generic_serial_ids[] = {
+static const struct usb_device_id generic_serial_ids[] = {
{.driver_info = 42},
{}
};
@@ -194,7 +195,7 @@
if (port->urbs_in_flight >
port->serial->type->max_in_flight_urbs) {
spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return bwrite;
}
port->tx_bytes_flight += towrite;
@@ -585,7 +586,7 @@
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!port->port.count)
+ if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
if (port->read_urb) {
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index 4313292..8093791 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -29,7 +29,7 @@
#define HP_VENDOR_ID 0x03f0
#define HP49GP_PRODUCT_ID 0x0121
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b97960a..3ef8df0 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -364,42 +364,6 @@
release_firmware(fw);
}
-
-/************************************************************************
- * *
- * Get string descriptor from device *
- * *
- ************************************************************************/
-static int get_string(struct usb_device *dev, int Id, char *string, int buflen)
-{
- struct usb_string_descriptor StringDesc;
- struct usb_string_descriptor *pStringDesc;
-
- dbg("%s - USB String ID = %d", __func__, Id);
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- &StringDesc, sizeof(StringDesc)))
- return 0;
-
- pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
- if (!pStringDesc)
- return 0;
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- pStringDesc, StringDesc.bLength)) {
- kfree(pStringDesc);
- return 0;
- }
-
- unicode_to_ascii(string, buflen,
- pStringDesc->wData, pStringDesc->bLength/2);
-
- kfree(pStringDesc);
- dbg("%s - USB String %s", __func__, string);
- return strlen(string);
-}
-
-
#if 0
/************************************************************************
*
@@ -2007,7 +1971,7 @@
return;
case IOSP_EXT_STATUS_RX_CHECK_RSP:
- dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3);
+ dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3);
/* Port->RxCheckRsp = true; */
return;
}
@@ -2075,7 +2039,7 @@
break;
default:
- dbg("%s - Unrecognized IOSP status code %u\n", __func__, code);
+ dbg("%s - Unrecognized IOSP status code %u", __func__, code);
break;
}
return;
@@ -2091,18 +2055,13 @@
{
int cnt;
- do {
- cnt = tty_buffer_request_room(tty, length);
- if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
- __func__, length - cnt);
- if (cnt == 0)
- break;
- }
- tty_insert_flip_string(tty, data, cnt);
- data += cnt;
- length -= cnt;
- } while (length > 0);
+ cnt = tty_insert_flip_string(tty, data, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ __func__, length - cnt);
+ }
+ data += cnt;
+ length -= cnt;
tty_flip_buffer_push(tty);
}
@@ -2530,7 +2489,7 @@
*divisor = custom;
- dbg("%s - Baud %d = %d\n", __func__, baudrate, custom);
+ dbg("%s - Baud %d = %d", __func__, baudrate, custom);
return 0;
}
@@ -2915,7 +2874,7 @@
break;
case EDGE_DOWNLOAD_FILE_NONE:
- dbg ("No download file specified, skipping download\n");
+ dbg("No download file specified, skipping download");
return;
default:
@@ -2997,10 +2956,12 @@
usb_set_serial_data(serial, edge_serial);
/* get the name for the device from the device */
- i = get_string(dev, dev->descriptor.iManufacturer,
+ i = usb_string(dev, dev->descriptor.iManufacturer,
&edge_serial->name[0], MAX_NAME_LEN+1);
+ if (i < 0)
+ i = 0;
edge_serial->name[i++] = ' ';
- get_string(dev, dev->descriptor.iProduct,
+ usb_string(dev, dev->descriptor.iProduct,
&edge_serial->name[i], MAX_NAME_LEN+2 - i);
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 9241d31..feb56a4 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -14,7 +14,7 @@
#ifndef IO_TABLES_H
#define IO_TABLES_H
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
@@ -23,7 +23,7 @@
{ }
};
-static struct usb_device_id edgeport_4port_id_table [] = {
+static const struct usb_device_id edgeport_4port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
@@ -37,7 +37,7 @@
{ }
};
-static struct usb_device_id edgeport_8port_id_table [] = {
+static const struct usb_device_id edgeport_8port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
@@ -47,7 +47,7 @@
{ }
};
-static struct usb_device_id Epic_port_id_table [] = {
+static const struct usb_device_id Epic_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
@@ -60,7 +60,7 @@
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index d4cc0f7..aa876f7 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -134,7 +134,7 @@
/* Devices that this driver supports */
-static struct usb_device_id edgeport_1port_id_table [] = {
+static const struct usb_device_id edgeport_1port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -154,7 +154,7 @@
{ }
};
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
@@ -177,7 +177,7 @@
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -413,11 +413,18 @@
{
int status = 0;
int i;
- __u8 temp;
+ u8 *temp;
/* Must do a read before write */
if (!serial->TiReadI2C) {
- status = read_boot_mem(serial, 0, 1, &temp);
+ temp = kmalloc(1, GFP_KERNEL);
+ if (!temp) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ status = read_boot_mem(serial, 0, 1, temp);
+ kfree(temp);
if (status)
return status;
}
@@ -935,37 +942,47 @@
static int i2c_type_bootmode(struct edgeport_serial *serial)
{
int status;
- __u8 data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dbg("%s - read 2 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_II", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return 0;
+ goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dbg("%s - read 3 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_III", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
- return 0;
+ goto out;
}
dbg("%s - Unknown", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return -ENODEV;
+ status = -ENODEV;
+out:
+ kfree(data);
+ return status;
}
static int bulk_xfer(struct usb_serial *serial, void *buffer,
@@ -1113,7 +1130,7 @@
I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
if (start_address != 0) {
struct ti_i2c_firmware_rec *firmware_version;
- __u8 record;
+ u8 *record;
dbg("%s - Found Type FIRMWARE (Type 2) record",
__func__);
@@ -1165,6 +1182,15 @@
OperationalMajorVersion,
OperationalMinorVersion);
+ record = kmalloc(1, GFP_KERNEL);
+ if (!record) {
+ dev_err(dev, "%s - out of memory.\n",
+ __func__);
+ kfree(firmware_version);
+ kfree(rom_desc);
+ kfree(ti_manuf_desc);
+ return -ENOMEM;
+ }
/* In order to update the I2C firmware we must
* change the type 2 record to type 0xF2. This
* will force the UMP to come up in Boot Mode.
@@ -1177,13 +1203,14 @@
* firmware will update the record type from
* 0xf2 to 0x02.
*/
- record = I2C_DESC_TYPE_FIRMWARE_BLANK;
+ *record = I2C_DESC_TYPE_FIRMWARE_BLANK;
/* Change the I2C Firmware record type to
0xf2 to trigger an update */
status = write_rom(serial, start_address,
- sizeof(record), &record);
+ sizeof(*record), record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1196,19 +1223,21 @@
*/
status = read_rom(serial,
start_address,
- sizeof(record),
- &record);
+ sizeof(*record),
+ record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
- if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
+ if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
dev_err(dev,
"%s - error resetting device\n",
__func__);
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1226,6 +1255,7 @@
__func__, status);
/* return an error on purpose. */
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1686,7 +1716,7 @@
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
- dbg("%s - ===== Port %u MSR Status = %02x ======\n",
+ dbg("%s - ===== Port %u MSR Status = %02x ======",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
@@ -1790,7 +1820,6 @@
{
int queued;
- tty_buffer_request_room(tty, length);
queued = tty_insert_flip_string(tty, data, length);
if (queued < length)
dev_err(dev, "%s - dropping data, %d bytes lost\n",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d6231c3..3fea929 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -747,7 +747,6 @@
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 727d323..e1d0784 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -134,7 +134,7 @@
#define IPW_WANTS_TO_SEND 0x30
-static struct usb_device_id usb_ipw_ids[] = {
+static const struct usb_device_id usb_ipw_ids[] = {
{ USB_DEVICE(IPW_VID, IPW_PID) },
{ },
};
@@ -172,7 +172,6 @@
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 95d8d26..4a0f519 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -100,7 +100,7 @@
static u8 ir_xbof;
static u8 ir_add_bof;
-static struct usb_device_id ir_id_table[] = {
+static const struct usb_device_id ir_id_table[] = {
{ USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
{ USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
{ USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
@@ -445,11 +445,6 @@
dbg("%s - port %d", __func__, port->number);
- if (!port->port.count) {
- dbg("%s - port closed.", __func__);
- return;
- }
-
switch (status) {
case 0: /* Successful */
/*
@@ -462,10 +457,8 @@
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, data);
tty = tty_port_tty_get(&port->port);
- if (tty_buffer_request_room(tty, urb->actual_length - 1)) {
- tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
/*
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e6e02b1..43f13cf 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -43,7 +43,7 @@
#define DRIVER_VERSION "v0.11"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
{} /* Terminating entry */
};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f8c4b07..297163c 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -464,13 +464,9 @@
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
/* Outdat handling is common for all devices */
@@ -483,8 +479,7 @@
p_priv = usb_get_serial_port_data(port);
dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
- if (port->port.count)
- usb_serial_port_softint(port);
+ usb_serial_port_softint(port);
}
static void usa26_inack_callback(struct urb *urb)
@@ -615,12 +610,10 @@
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)",
+ __func__, err);
p_priv->in_flip ^= 1;
urb = p_priv->in_urbs[p_priv->in_flip];
@@ -856,12 +849,9 @@
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa49wg_indat_callback(struct urb *urb)
@@ -904,11 +894,7 @@
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
- if (port->port.count)
- tty_insert_flip_char(tty,
- data[i++], 0);
- else
- i++;
+ tty_insert_flip_char(tty, data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
@@ -922,14 +908,12 @@
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- if (port->port.count)
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(tty,
data[i+1], flag);
i += 2;
}
}
- if (port->port.count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
}
@@ -1013,13 +997,9 @@
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
@@ -2418,8 +2398,7 @@
msg.portEnabled = 0;
/* Sending intermediate configs */
else {
- if (port->port.count)
- msg.portEnabled = 1;
+ msg.portEnabled = 1;
msg.txBreak = (p_priv->break_on);
}
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 30771e5..bf3297d 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -456,7 +456,7 @@
NULL,
};
-static struct usb_device_id keyspan_ids_combined[] = {
+static const struct usb_device_id keyspan_ids_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
@@ -497,7 +497,7 @@
};
/* usb_device_id table for the pre-firmware download keyspan devices */
-static struct usb_device_id keyspan_pre_ids[] = {
+static const struct usb_device_id keyspan_pre_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
@@ -513,7 +513,7 @@
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_1port_ids[] = {
+static const struct usb_device_id keyspan_1port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
@@ -524,7 +524,7 @@
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_2port_ids[] = {
+static const struct usb_device_id keyspan_2port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
@@ -532,7 +532,7 @@
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_4port_ids[] = {
+static const struct usb_device_id keyspan_4port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 1296a09..185fe9a 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -125,7 +125,7 @@
#define ENTREGRA_VENDOR_ID 0x1645
#define ENTREGRA_FAKE_ID 0x8093
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
#ifdef KEYSPAN
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
#endif
@@ -147,20 +147,20 @@
.no_dynamic_id = 1,
};
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
};
#ifdef KEYSPAN
-static struct usb_device_id id_table_fake [] = {
+static const struct usb_device_id id_table_fake[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
{ } /* Terminating entry */
};
#endif
#ifdef XIRCOM
-static struct usb_device_id id_table_fake_xircom [] = {
+static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
{ }
@@ -429,13 +429,20 @@
unsigned char *value)
{
int rc;
- unsigned char data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
- 0, 0, &data, 1, 2000);
+ 0, 0, data, 1, 2000);
if (rc >= 0)
- *value = data;
+ *value = *data;
+
+ kfree(data);
return rc;
}
@@ -543,7 +550,14 @@
device how much room it really has. This is done only on
scheduler time, since usb_control_msg() sleeps. */
if (count > priv->tx_room && !in_interrupt()) {
- unsigned char room;
+ u8 *room;
+
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
rc = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
@@ -551,9 +565,14 @@
| USB_DIR_IN,
0, /* value: 0 means "remaining room" */
0, /* index */
- &room,
+ room,
1,
2000);
+ if (rc > 0) {
+ dbg(" roomquery says %d", *room);
+ priv->tx_room = *room;
+ }
+ kfree(room);
if (rc < 0) {
dbg(" roomquery failed");
goto exit;
@@ -563,8 +582,6 @@
rc = -EIO; /* device didn't return any data */
goto exit;
}
- dbg(" roomquery says %d", room);
- priv->tx_room = room;
}
if (count > priv->tx_room) {
/* we're about to completely fill the Tx buffer, so
@@ -684,18 +701,22 @@
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- unsigned char room;
+ u8 *room;
int rc = 0;
struct keyspan_pda_private *priv;
/* find out how much room is in the Tx ring */
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE
| USB_DIR_IN,
0, /* value */
0, /* index */
- &room,
+ room,
1,
2000);
if (rc < 0) {
@@ -708,8 +729,8 @@
goto error;
}
priv = usb_get_serial_port_data(port);
- priv->tx_room = room;
- priv->tx_throttled = room ? 0 : 1;
+ priv->tx_room = *room;
+ priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
port->interrupt_in_urb->dev = serial->dev;
@@ -718,8 +739,8 @@
dbg("%s - usb_submit_urb(read int) failed", __func__);
goto error;
}
-
error:
+ kfree(room);
return rc;
}
static void keyspan_pda_close(struct usb_serial_port *port)
@@ -789,6 +810,13 @@
return 1;
}
+#ifdef KEYSPAN
+MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+#endif
+#ifdef XIRCOM
+MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+#endif
+
static int keyspan_pda_startup(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3a78738..8eef91b 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -94,7 +94,7 @@
/*
* All of the device info needed for the KLSI converters.
*/
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
{ USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
{ } /* Terminating entry */
@@ -212,10 +212,19 @@
unsigned long *line_state_p)
{
int rc;
- __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1};
+ u8 *status_buf;
__u16 status;
dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
+
+ status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
+ if (!status_buf) {
+ dev_err(&port->dev, "%s - out of memory for status buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ status_buf[0] = 0xff;
+ status_buf[1] = 0xff;
rc = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_POLL,
@@ -236,6 +245,8 @@
*line_state_p = klsi_105_status2linestate(status);
}
+
+ kfree(status_buf);
return rc;
}
@@ -364,7 +375,7 @@
int rc;
int i;
unsigned long line_state;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
dbg("%s port %d", __func__, port->number);
@@ -376,12 +387,18 @@
* Then read the modem line control and store values in
* priv->line_state.
*/
- cfg.pktlen = 5;
- cfg.baudrate = kl5kusb105a_sio_b9600;
- cfg.databits = kl5kusb105a_dtb_8;
- cfg.unknown1 = 0;
- cfg.unknown2 = 1;
- klsi_105_chg_port_settings(port, &cfg);
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cfg->pktlen = 5;
+ cfg->baudrate = kl5kusb105a_sio_b9600;
+ cfg->databits = kl5kusb105a_dtb_8;
+ cfg->unknown1 = 0;
+ cfg->unknown2 = 1;
+ klsi_105_chg_port_settings(port, cfg);
/* set up termios structure */
spin_lock_irqsave(&priv->lock, flags);
@@ -391,11 +408,11 @@
priv->termios.c_lflag = tty->termios->c_lflag;
for (i = 0; i < NCCS; i++)
priv->termios.c_cc[i] = tty->termios->c_cc[i];
- priv->cfg.pktlen = cfg.pktlen;
- priv->cfg.baudrate = cfg.baudrate;
- priv->cfg.databits = cfg.databits;
- priv->cfg.unknown1 = cfg.unknown1;
- priv->cfg.unknown2 = cfg.unknown2;
+ priv->cfg.pktlen = cfg->pktlen;
+ priv->cfg.baudrate = cfg->baudrate;
+ priv->cfg.databits = cfg->databits;
+ priv->cfg.unknown1 = cfg->unknown1;
+ priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
@@ -441,6 +458,7 @@
retval = rc;
exit:
+ kfree(cfg);
return retval;
} /* klsi_105_open */
@@ -681,7 +699,6 @@
bytes_sent = urb->actual_length - 2;
}
- tty_buffer_request_room(tty, bytes_sent);
tty_insert_flip_string(tty, data + 2, bytes_sent);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
@@ -714,10 +731,17 @@
unsigned int old_iflag = old_termios->c_iflag;
unsigned int cflag = tty->termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
speed_t baud;
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return;
+ }
+
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
@@ -793,11 +817,11 @@
case CS5:
dbg("%s - 5 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS6:
dbg("%s - 6 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS7:
priv->cfg.databits = kl5kusb105a_dtb_7;
break;
@@ -856,11 +880,13 @@
#endif
;
}
- memcpy(&cfg, &priv->cfg, sizeof(cfg));
+ memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
/* now commit changes to device */
- klsi_105_chg_port_settings(port, &cfg);
+ klsi_105_chg_port_settings(port, cfg);
+err:
+ kfree(cfg);
} /* klsi_105_set_termios */
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 45ea694..c113a2a 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -86,7 +86,7 @@
struct usb_serial_port *port, struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
@@ -388,7 +388,6 @@
*/
/* END DEBUG */
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -624,7 +623,6 @@
unsigned short urb_val = 0;
int c_cflag = tty->termios->c_cflag;
speed_t speed;
- void *settings;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -647,25 +645,13 @@
}
urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
SUSBCR_SPASB_1StopBit;
-
- settings = kzalloc(50, GFP_KERNEL);
- if (!settings)
- return;
-
- sprintf(settings, "%d ", speed);
-
if (c_cflag & PARENB) {
- if (c_cflag & PARODD) {
+ if (c_cflag & PARODD)
urb_val |= SUSBCR_SPASB_OddParity;
- strcat(settings, "Odd Parity");
- } else {
+ else
urb_val |= SUSBCR_SPASB_EvenParity;
- strcat(settings, "Even Parity");
- }
- } else {
+ } else
urb_val |= SUSBCR_SPASB_NoParity;
- strcat(settings, "No Parity");
- }
tty->termios->c_cflag &= ~CMSPAR;
tty_encode_baud_rate(tty, speed, speed);
@@ -675,11 +661,10 @@
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
urb_val,
0,
- settings,
+ NULL,
0,
KOBIL_TIMEOUT
);
- kfree(settings);
}
static int kobil_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index cd009cb..2849f8c 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -75,6 +75,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "mct_u232.h"
@@ -110,7 +111,7 @@
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
@@ -231,19 +232,22 @@
static int mct_u232_set_baud_rate(struct tty_struct *tty,
struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
{
- __le32 divisor;
+ unsigned int divisor;
int rc;
- unsigned char zero_byte = 0;
+ unsigned char *buf;
unsigned char cts_enable_byte = 0;
speed_t speed;
- divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value,
- &speed));
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
+ put_unaligned_le32(cpu_to_le32(divisor), buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE,
+ 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
WDR_TIMEOUT);
if (rc < 0) /*FIXME: What value speed results */
dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
@@ -269,10 +273,11 @@
a device which is not asserting 'CTS'.
*/
+ buf[0] = 0;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE,
+ 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
@@ -284,30 +289,40 @@
dbg("set_baud_rate: send second control message, data = %02X",
cts_enable_byte);
+ buf[0] = cts_enable_byte;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE,
+ 0, 0, buf, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
+ kfree(buf);
return rc;
} /* mct_u232_set_baud_rate */
static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf[0] = lcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_LINE_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
dbg("set_line_ctrl: 0x%x", lcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_line_ctrl */
@@ -315,23 +330,31 @@
unsigned int control_state)
{
int rc;
- unsigned char mcr = MCT_U232_MCR_NONE;
+ unsigned char mcr;
+ unsigned char *buf;
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ mcr = MCT_U232_MCR_NONE;
if (control_state & TIOCM_DTR)
mcr |= MCT_U232_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= MCT_U232_MCR_RTS;
+ buf[0] = mcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_MODEM_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_modem_ctrl */
@@ -339,17 +362,27 @@
unsigned char *msr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ *msr = 0;
+ return -ENOMEM;
+ }
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCT_U232_GET_MODEM_STAT_REQUEST,
MCT_U232_GET_REQUEST_TYPE,
- 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE,
+ 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
if (rc < 0) {
dev_err(&serial->dev->dev,
"Get MODEM STATus failed (error = %d)\n", rc);
*msr = 0;
+ } else {
+ *msr = buf[0];
}
dbg("get_modem_stat: 0x%x", *msr);
+ kfree(buf);
return rc;
} /* mct_u232_get_modem_stat */
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 07b6bec..7417d5c 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -73,6 +73,8 @@
#define MCT_U232_SET_CTS_REQUEST 12
#define MCT_U232_SET_CTS_SIZE 1
+#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
+
/*
* Baud rate (divisor)
* Actually, there are two of them, MCT website calls them "Philips solution"
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 763e32a..0d47f2c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -81,12 +81,15 @@
static int debug;
+static struct usb_serial_driver moschip7720_2port_driver;
+
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
+ { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
{ } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
@@ -106,7 +109,7 @@
__u8 sp1;
__u8 sp2;
- dbg("%s", " : Entering\n");
+ dbg(" : Entering");
switch (status) {
case 0:
@@ -186,6 +189,75 @@
}
/*
+ * mos7715_interrupt_callback
+ * this is the 7715's callback function for when we have received data on
+ * the interrupt endpoint.
+ */
+static void mos7715_interrupt_callback(struct urb *urb)
+{
+ int result;
+ int length;
+ int status = urb->status;
+ __u8 *data;
+ __u8 iir;
+
+ switch (status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__,
+ status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+ goto exit;
+ }
+
+ length = urb->actual_length;
+ data = urb->transfer_buffer;
+
+ /* Structure of data from 7715 device:
+ * Byte 1: IIR serial Port
+ * Byte 2: unused
+ * Byte 2: DSR parallel port
+ * Byte 4: FIFO status for both */
+
+ if (unlikely(length != 4)) {
+ dbg("Wrong data !!!");
+ return;
+ }
+
+ iir = data[0];
+ if (!(iir & 0x01)) { /* serial port interrupt pending */
+ switch (iir & 0x0f) {
+ case SERIAL_IIR_RLS:
+ dbg("Serial Port: Receiver status error or address "
+ "bit detected in 9-bit mode\n");
+ break;
+ case SERIAL_IIR_CTI:
+ dbg("Serial Port: Receiver time out");
+ break;
+ case SERIAL_IIR_MS:
+ dbg("Serial Port: Modem status change");
+ break;
+ }
+ }
+
+exit:
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev,
+ "%s - Error %d submitting control urb\n",
+ __func__, result);
+ return;
+}
+
+/*
* mos7720_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -206,7 +278,7 @@
mos7720_port = urb->context;
if (!mos7720_port) {
- dbg("%s", "NULL mos7720_port pointer \n");
+ dbg("NULL mos7720_port pointer");
return ;
}
@@ -218,7 +290,6 @@
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -275,17 +346,15 @@
* this function will be used for sending command to device
*/
static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
- __u16 index, void *data)
+ __u16 index, u8 *data)
{
int status;
- unsigned int pipe;
+ u8 *buf;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
- __u8 requesttype;
- __u16 size = 0x0000;
if (value < MOS_MAX_PORT) {
if (product == MOSCHIP_DEVICE_ID_7715)
- value = value*0x100+0x100;
+ value = 0x0200; /* identifies the 7715's serial port */
else
value = value*0x100+0x200;
} else {
@@ -298,27 +367,58 @@
}
if (request == MOS_WRITE) {
- request = (__u8)MOS_WRITE;
- requesttype = (__u8)0x40;
- value = value + (__u16)*((unsigned char *)data);
- data = NULL;
- pipe = usb_sndctrlpipe(serial->dev, 0);
+ value = value + *data;
+ status = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
+ 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
} else {
- request = (__u8)MOS_READ;
- requesttype = (__u8)0xC0;
- size = 0x01;
- pipe = usb_rcvctrlpipe(serial->dev, 0);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf) {
+ status = -ENOMEM;
+ goto out;
+ }
+ status = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
+ 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
+ *data = *buf;
+ kfree(buf);
}
-
- status = usb_control_msg(serial->dev, pipe, request, requesttype,
- value, index, data, size, MOS_WDR_TIMEOUT);
-
+out:
if (status < 0)
- dbg("Command Write failed Value %x index %x\n", value, index);
+ dbg("Command Write failed Value %x index %x", value, index);
return status;
}
+
+/*
+ * mos77xx_probe
+ * this function installs the appropriate read interrupt endpoint callback
+ * depending on whether the device is a 7720 or 7715, thus avoiding costly
+ * run-time checks in the high-frequency callback routine itself.
+ */
+static int mos77xx_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
+ moschip7720_2port_driver.read_int_callback =
+ mos7715_interrupt_callback;
+ else
+ moschip7720_2port_driver.read_int_callback =
+ mos7720_interrupt_callback;
+
+ return 0;
+}
+
+static int mos77xx_calc_num_ports(struct usb_serial *serial)
+{
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ if (product == MOSCHIP_DEVICE_ID_7715)
+ return 1;
+
+ return 2;
+}
+
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
@@ -390,7 +490,7 @@
*/
port_number = port->number - port->serial->minor;
send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
- dbg("SS::%p LSR:%x\n", mos7720_port, data);
+ dbg("SS::%p LSR:%x", mos7720_port, data);
dbg("Check:Sending Command ..........");
@@ -729,7 +829,7 @@
struct moschip_port *mos7720_port;
int status;
- dbg("%s- port %d\n", __func__, port->number);
+ dbg("%s- port %d", __func__, port->number);
mos7720_port = usb_get_serial_port_data(port);
@@ -1208,7 +1308,7 @@
return;
}
- dbg("%s\n", "setting termios - ASPIRE");
+ dbg("setting termios - ASPIRE");
cflag = tty->termios->c_cflag;
@@ -1226,7 +1326,7 @@
change_port_settings(tty, mos7720_port, old_termios);
if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!\n");
+ dbg("URB KILLED !!!!!");
return;
}
@@ -1495,6 +1595,7 @@
struct usb_device *dev;
int i;
char data;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
dbg("%s: Entering ..........", __func__);
@@ -1514,6 +1615,29 @@
usb_set_serial_data(serial, mos7720_serial);
+ /*
+ * The 7715 uses the first bulk in/out endpoint pair for the parallel
+ * port, and the second for the serial port. Because the usbserial core
+ * assumes both pairs are serial ports, we must engage in a bit of
+ * subterfuge and swap the pointers for ports 0 and 1 in order to make
+ * port 0 point to the serial port. However, both moschip devices use a
+ * single interrupt-in endpoint for both ports (as mentioned a little
+ * further down), and this endpoint was assigned to port 0. So after
+ * the swap, we must copy the interrupt endpoint elements from port 1
+ * (as newly assigned) to port 0, and null out port 1 pointers.
+ */
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ struct usb_serial_port *tmp = serial->port[0];
+ serial->port[0] = serial->port[1];
+ serial->port[1] = tmp;
+ serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
+ serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
+ serial->port[0]->interrupt_in_endpointAddress =
+ tmp->interrupt_in_endpointAddress;
+ serial->port[1]->interrupt_in_urb = NULL;
+ serial->port[1]->interrupt_in_buffer = NULL;
+ }
+
/* we set up the pointers to the endpoints in the mos7720_open *
* function, as the structures aren't created yet. */
@@ -1529,7 +1653,7 @@
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt endpoint
- * comman to all ports */
+ * common to all ports */
serial->port[i]->interrupt_in_endpointAddress =
serial->port[0]->interrupt_in_endpointAddress;
@@ -1584,11 +1708,12 @@
.description = "Moschip 2 port adapter",
.usb_driver = &usb_driver,
.id_table = moschip_port_id_table,
- .num_ports = 2,
+ .calc_num_ports = mos77xx_calc_num_ports,
.open = mos7720_open,
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
+ .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.ioctl = mos7720_ioctl,
@@ -1600,7 +1725,7 @@
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = mos7720_interrupt_callback,
+ .read_int_callback = NULL /* dynamically assigned in probe() */
};
static int __init moschip7720_init(void)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2cfe245..2fda1c0 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -181,7 +181,7 @@
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -198,7 +198,7 @@
{} /* terminating entry */
};
-static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
+static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -283,12 +283,19 @@
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ *val = buf[0];
dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
- *val = (*val) & 0x00ff;
+
+ kfree(buf);
return ret;
}
@@ -341,6 +348,11 @@
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/* dbg("application number is %4x",
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -364,9 +376,11 @@
}
}
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
- *val = (*val) & 0x00ff;
+ *val = buf[0];
+
+ kfree(buf);
return ret;
}
@@ -750,7 +764,6 @@
if (urb->actual_length) {
tty = tty_port_tty_get(&mos7840_port->port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
dbg(" %s ", data);
tty_flip_buffer_push(tty);
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index 99bd00f5..cf17183 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -21,7 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 5ceaa4c..04a6cbb 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
{ },
};
@@ -66,7 +66,6 @@
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 0622650..89c724c 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -75,7 +75,7 @@
static void omninet_release(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
@@ -218,8 +218,8 @@
if (debug && header->oh_xxx != 0x30) {
if (urb->actual_length) {
- printk(KERN_DEBUG __FILE__
- ": omninet_read %d: ", header->oh_len);
+ printk(KERN_DEBUG "%s: omninet_read %d: ",
+ __FILE__, header->oh_len);
for (i = 0; i < (header->oh_len +
OMNINET_HEADERLEN); i++)
printk("%.2x ", data[i]);
@@ -332,7 +332,7 @@
struct usb_serial_port *port = urb->context;
int status = urb->status;
- dbg("%s - port %0x\n", __func__, port->number);
+ dbg("%s - port %0x", __func__, port->number);
port->write_urb_busy = 0;
if (status) {
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4cdb975..f37476e 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
{ },
};
@@ -55,7 +55,6 @@
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -96,13 +95,9 @@
/* real data, send it to the tty layer */
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
@@ -217,7 +212,7 @@
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -288,7 +283,7 @@
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6e94a67..847b805 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -336,15 +336,42 @@
#define AIRPLUS_VENDOR_ID 0x1011
#define AIRPLUS_PRODUCT_MCD650 0x3198
+/* Longcheer/Longsung vendor ID; makes whitelabel devices that
+ * many other vendors like 4G Systems, Alcatel, ChinaBird,
+ * Mobidata, etc sell under their own brand names.
+ */
+#define LONGCHEER_VENDOR_ID 0x1c9e
+
/* 4G Systems products */
-#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
+/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+ * It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-static struct usb_device_id option_ids[] = {
+/* some devices interfaces need special handling due to a number of reasons */
+enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+ OPTION_BLACKLIST_SENDSETUP = 1,
+ OPTION_BLACKLIST_RESERVED_IF = 2
+};
+
+struct option_blacklist_info {
+ const u32 infolen; /* number of interface numbers on blacklist */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+ enum option_blacklist_reason reason;
+};
+
+static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
+static const struct option_blacklist_info four_g_w14_blacklist = {
+ .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
+ .ifaceinfo = four_g_w14_no_sendsetup,
+ .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
+static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
@@ -644,7 +671,9 @@
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
- { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ } /* Terminating entry */
};
@@ -709,6 +738,7 @@
spinlock_t susp_lock;
unsigned int suspended:1;
int in_flight;
+ struct option_blacklist_info *blacklist_info;
};
struct option_port_private {
@@ -778,9 +808,27 @@
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
+ data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
return 0;
}
+static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
+ const struct option_blacklist_info *blacklist)
+{
+ const u8 *info;
+ int i;
+
+ if (blacklist) {
+ info = blacklist->ifaceinfo;
+
+ for (i = 0; i < blacklist->infolen; i++) {
+ if (info[i] == ifnum)
+ return blacklist->reason;
+ }
+ }
+ return OPTION_BLACKLIST_NONE;
+}
+
static void option_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -921,7 +969,6 @@
} else {
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
} else
@@ -929,9 +976,9 @@
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN) {
+ if (status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
printk(KERN_ERR "%s: resubmit read urb failed. "
"(%d)", __func__, err);
else
@@ -985,7 +1032,7 @@
(struct usb_ctrlrequest *)urb->transfer_buffer;
if (!req_pkt) {
- dbg("%s: NULL req_pkt\n", __func__);
+ dbg("%s: NULL req_pkt", __func__);
return;
}
if ((req_pkt->bRequestType == 0xA1) &&
@@ -1211,11 +1258,19 @@
static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ struct option_intf_private *intfdata =
+ (struct option_intf_private *) serial->private;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
dbg("%s", __func__);
+ if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
+ OPTION_BLACKLIST_SENDSETUP) {
+ dbg("No send_setup on blacklisted interface #%d\n", ifNum);
+ return -EIO;
+ }
+
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
@@ -1401,7 +1456,7 @@
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!port->interrupt_in_urb) {
- dbg("%s: No interrupt URB for port %d\n", __func__, i);
+ dbg("%s: No interrupt URB for port %d", __func__, i);
continue;
}
err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index c644e26..deeacde 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -58,7 +58,7 @@
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
#define OTI6858_VERSION "0.1"
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
{ }
};
@@ -302,7 +302,7 @@
struct usb_serial_port *port = priv->port;
int count = 0, result;
unsigned long flags;
- unsigned char allow;
+ u8 *allow;
dbg("%s(port = %d)", __func__, port->number);
@@ -321,13 +321,20 @@
count = port->bulk_out_size;
if (count != 0) {
+ allow = kmalloc(1, GFP_KERNEL);
+ if (!allow) {
+ dev_err(&port->dev, "%s(): kmalloc failed\n",
+ __func__);
+ return;
+ }
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
OTI6858_REQ_CHECK_TXBUFF,
- count, 0, &allow, 1, 100);
- if (result != 1 || allow != 0)
+ count, 0, allow, 1, 100);
+ if (result != 1 || *allow != 0)
count = 0;
+ kfree(allow);
}
if (count == 0) {
@@ -578,9 +585,6 @@
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- if (port->port.count != 1)
- return 0;
-
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (buf == NULL) {
dev_err(&port->dev, "%s(): out of memory!\n", __func__);
@@ -927,10 +931,6 @@
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- if (!port->port.count) {
- dbg("%s(): port is closed, exiting", __func__);
- return;
- }
/*
if (status == -EPROTO) {
* PL2303 mysteriously fails with -EPROTO reschedule
@@ -954,14 +954,12 @@
}
tty_kref_put(tty);
- /* schedule the interrupt urb if we are still open */
- if (port->port.count != 0) {
- port->interrupt_in_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result != 0) {
- dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
- " error %d\n", __func__, result);
- }
+ /* schedule the interrupt urb */
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0 && result != -EPERM) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __func__, result);
}
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9ec1a49..73d5f34 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,7 +50,7 @@
char *buf_put;
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
@@ -451,7 +451,6 @@
port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s - failed submitting write urb,"
@@ -769,7 +768,6 @@
pl2303_set_termios(tty, port, &tmp_termios);
dbg("%s - submitting read urb", __func__);
- port->read_urb->dev = serial->dev;
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
@@ -779,7 +777,6 @@
}
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
@@ -895,10 +892,23 @@
static int pl2303_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd);
switch (cmd) {
+ case TIOCGSERIAL:
+ memset(&ser, 0, sizeof ser);
+ ser.type = PORT_16654;
+ ser.line = port->serial->minor;
+ ser.port = port->number;
+ ser.baud_base = 460800;
+
+ if (copy_to_user((void __user *)arg, &ser, sizeof ser))
+ return -EFAULT;
+
+ return 0;
+
case TIOCMIWAIT:
dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
return wait_modem_info(port, arg);
@@ -1042,7 +1052,6 @@
tty_flag = TTY_FRAME;
dbg("%s - tty_flag = %d", __func__, tty_flag);
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -1072,16 +1081,11 @@
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
/* PL2303 mysteriously fails with -EPROTO reschedule
* the read */
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed"
@@ -1108,15 +1112,10 @@
}
tty_kref_put(tty);
/* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
- }
-
- return;
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev, "%s - failed resubmitting"
+ " read urb, error %d\n", __func__, result);
}
static void pl2303_write_bulk_callback(struct urb *urb)
@@ -1146,7 +1145,6 @@
dbg("%s - nonzero write bulk status received: %d", __func__,
status);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed resubmitting write"
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
new file mode 100644
index 0000000..0b93620
--- /dev/null
+++ b/drivers/usb/serial/qcaux.c
@@ -0,0 +1,96 @@
+/*
+ * Qualcomm USB Auxiliary Serial Port driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Devices listed here usually provide a CDC ACM port on which normal modem
+ * AT commands and PPP can be used. But when that port is in-use by PPP it
+ * cannot be used simultaneously for status or signal strength. Instead, the
+ * ports here can be queried for that information using the Qualcomm DM
+ * protocol.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
+ * for normal AT commands, but also provide secondary USB interfaces for the
+ * QCDM-capable ports. Devices that do not provide a CDC-ACM port should
+ * probably be driven by option.ko.
+ */
+
+/* UTStarcom/Pantech/Curitel devices */
+#define UTSTARCOM_VENDOR_ID 0x106c
+#define UTSTARCOM_PRODUCT_PC5740 0x3701
+#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
+#define UTSTARCOM_PRODUCT_UM150 0x3711
+#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+
+/* CMOTECH devices */
+#define CMOTECH_VENDOR_ID 0x16d8
+#define CMOTECH_PRODUCT_CDU550 0x5553
+#define CMOTECH_PRODUCT_CDX650 0x6512
+
+static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver qcaux_driver = {
+ .name = "qcaux",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver qcaux_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcaux",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+};
+
+static int __init qcaux_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&qcaux_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&qcaux_driver);
+ if (retval)
+ usb_serial_deregister(&qcaux_device);
+ return retval;
+}
+
+static void __exit qcaux_exit(void)
+{
+ usb_deregister(&qcaux_driver);
+ usb_serial_deregister(&qcaux_device);
+}
+
+module_init(qcaux_init);
+module_exit(qcaux_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7528b8d..310ff6e 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index 951ea0c..cb8195c 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -22,7 +22,7 @@
#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
/* Vendor and product id for 6ES7-972-0CB20-0XA0 */
{ USB_DEVICE(0x908, 0x0004) },
{ },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 3eb6143..34e6f89 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -226,7 +226,7 @@
.ifaceinfo = direct_ip_non_serial_ifaces,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
@@ -304,16 +304,6 @@
};
MODULE_DEVICE_TABLE(usb, id_table);
-static struct usb_driver sierra_driver = {
- .name = "sierra",
- .probe = usb_serial_probe,
- .disconnect = usb_serial_disconnect,
- .suspend = usb_serial_suspend,
- .resume = usb_serial_resume,
- .id_table = id_table,
- .no_dynamic_id = 1,
- .supports_autosuspend = 1,
-};
struct sierra_port_private {
spinlock_t lock; /* lock the structure */
@@ -477,7 +467,7 @@
static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata;
struct usb_serial *serial = port->serial;
unsigned long flags;
@@ -604,14 +594,15 @@
} else {
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
+ tty_kref_put(tty);
+ usb_serial_debug_data(debug, &port->dev,
+ __func__, urb->actual_length, data);
+ }
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
@@ -619,10 +610,10 @@
}
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+ if (status != -ESHUTDOWN && status != -EPERM) {
usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "resubmit read urb failed."
"(%d)\n", err);
}
@@ -681,11 +672,11 @@
dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
+ if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
"failed. (%d)\n", __func__, err);
}
@@ -1061,11 +1052,31 @@
return ec ? -EIO : 0;
}
+
+static int sierra_reset_resume(struct usb_interface *intf)
+{
+ struct usb_serial *serial = usb_get_intfdata(intf);
+ dev_err(&serial->dev->dev, "%s\n", __func__);
+ return usb_serial_resume(intf);
+}
#else
#define sierra_suspend NULL
#define sierra_resume NULL
+#define sierra_reset_resume NULL
#endif
+static struct usb_driver sierra_driver = {
+ .name = "sierra",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
+ .reset_resume = sierra_reset_resume,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+ .supports_autosuspend = 1,
+};
+
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1e58220..5d39191 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -45,7 +45,7 @@
#define SPCP8x5_835_VID 0x04fc
#define SPCP8x5_835_PID 0x0231
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
{ USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
{ USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
@@ -609,7 +609,7 @@
if (i < 0)
dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
uartdata, i);
- dbg("0x21:0x40:0:0 %d\n", i);
+ dbg("0x21:0x40:0:0 %d", i);
if (cflag & CRTSCTS) {
/* enable hardware flow control */
@@ -677,7 +677,6 @@
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
- int i;
int result = urb->status;
u8 status;
char tty_flag;
@@ -687,8 +686,6 @@
/* check the urb status */
if (result) {
- if (!port->port.count)
- return;
if (result == -EPROTO) {
/* spcp8x5 mysteriously fails with -EPROTO */
/* reschedule the read */
@@ -726,26 +723,20 @@
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- for (i = 0; i < urb->actual_length; ++i)
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_string_fixed_flag(tty, data,
+ urb->actual_length, tty_flag);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev, "failed submitting read urb %d\n",
- result);
- }
-
- return;
+ /* Schedule the next read */
+ urb->dev = port->serial->dev;
+ result = usb_submit_urb(urb , GFP_ATOMIC);
+ if (result)
+ dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
}
/* get data from ring buffer and then write to usb bus */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index b282c0f..7239888 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
{ },
};
@@ -51,7 +51,6 @@
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -89,13 +88,8 @@
*/
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, &data[1],
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, &data[1], data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 1e9dc88..0afe5c7 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1271,14 +1271,13 @@
int cnt;
do {
- cnt = tty_buffer_request_room(tty, length);
+ cnt = tty_insert_flip_string(tty, data, length);
if (cnt < length) {
dev_err(dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
- tty_insert_flip_string(tty, data, cnt);
tty_flip_buffer_push(tty);
data += cnt;
length -= cnt;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 33c85f7..3873660 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -358,10 +358,6 @@
dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
- /* count is managed under the mutex lock for the tty so cannot
- drop to zero until after the last close completes */
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
@@ -373,7 +369,6 @@
{
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
return port->serial->type->write_room(tty);
}
@@ -381,7 +376,7 @@
static int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- dbg("%s = port %d", __func__, port->number);
+ dbg("%s - port %d", __func__, port->number);
/* if the device was unplugged then any remaining characters
fell out of the connector ;) */
@@ -396,7 +391,6 @@
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->throttle)
port->serial->type->throttle(tty);
@@ -407,7 +401,6 @@
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->unthrottle)
port->serial->type->unthrottle(tty);
@@ -421,8 +414,6 @@
dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->ioctl) {
@@ -437,7 +428,6 @@
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->set_termios)
@@ -452,7 +442,6 @@
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->break_ctl)
@@ -513,7 +502,6 @@
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmget)
return port->serial->type->tiocmget(tty, file);
return -EINVAL;
@@ -526,7 +514,6 @@
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmset)
return port->serial->type->tiocmset(tty, file, set, clear);
return -EINVAL;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 7b5bfc4..252cc2d 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -29,7 +29,7 @@
0xff,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0525, 0x127a) },
{ },
};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ad1f923..0949427 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -368,7 +368,7 @@
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -446,7 +446,7 @@
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -503,13 +503,9 @@
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- urb->actual_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
spin_lock(&priv->lock);
@@ -807,10 +803,14 @@
{
struct device *dev = &serial->dev->dev;
int result;
- u8 data;
+ u8 *data;
dbg("%s", __func__);
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
/*
* Note that PEG-300 series devices expect the following two calls.
*/
@@ -818,36 +818,42 @@
/* get the config number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get config number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev, "%s: get config number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
/* get the interface number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_INTERFACE,
USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get interface number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev,
"%s: get interface number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
- return generic_startup(serial);
+ result = generic_startup(serial);
+out:
+ kfree(data);
+
+ return result;
}
static int treo_attach(struct usb_serial *serial)
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
new file mode 100644
index 0000000..f719d00
--- /dev/null
+++ b/drivers/usb/serial/vivopay-serial.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2009 Outpost Embedded, LLC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_DESC "ViVOpay USB Serial Driver"
+
+#define VIVOPAY_VENDOR_ID 0x1d5f
+
+
+static struct usb_device_id id_table [] = {
+ /* ViVOpay 8800 */
+ { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver vivopay_serial_driver = {
+ .name = "vivopay-serial",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver vivopay_serial_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vivopay-serial",
+ },
+ .id_table = id_table,
+ .usb_driver = &vivopay_serial_driver,
+ .num_ports = 1,
+};
+
+static int __init vivopay_serial_init(void)
+{
+ int retval;
+ retval = usb_serial_register(&vivopay_serial_device);
+ if (retval)
+ goto failed_usb_serial_register;
+ retval = usb_register(&vivopay_serial_driver);
+ if (retval)
+ goto failed_usb_register;
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+ DRIVER_DESC "\n");
+ return 0;
+failed_usb_register:
+ usb_serial_deregister(&vivopay_serial_device);
+failed_usb_serial_register:
+ return retval;
+}
+
+static void __exit vivopay_serial_exit(void)
+{
+ usb_deregister(&vivopay_serial_driver);
+ usb_serial_deregister(&vivopay_serial_device);
+}
+
+module_init(vivopay_serial_init);
+module_exit(vivopay_serial_exit);
+
+MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1093d2e..12ed820 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -111,17 +111,17 @@
separate ID tables, and then a third table that combines them
just for the purpose of exporting the autoloading information.
*/
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_prerenumeration [] = {
+static const struct usb_device_id id_table_prerenumeration[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
@@ -1492,21 +1492,9 @@
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- if (tty && urb->actual_length) {
- int len = tty_buffer_request_room(tty,
- urb->actual_length);
- /* This stuff can go away now I suspect */
- if (unlikely(len < urb->actual_length)) {
- spin_lock_irqsave(&info->lock, flags);
- list_add(tmp, &info->rx_urb_q);
- spin_unlock_irqrestore(&info->lock, flags);
- tty_flip_buffer_push(tty);
- schedule_work(&info->rx_work);
- goto out;
- }
- tty_insert_flip_string(tty, urb->transfer_buffer, len);
- sent += len;
- }
+ if (tty && urb->actual_length)
+ sent += tty_insert_flip_string(tty,
+ urb->transfer_buffer, urb->actual_length);
urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 80e65f2..198bb3e 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -202,7 +202,7 @@
goto fail1;
onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
- GFP_ATOMIC, &onetouch->data_dma);
+ GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index aadc16b..4cc0355 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -133,7 +133,7 @@
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (queue_max_sectors(sdev->request_queue) > max_sectors)
+ if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -484,7 +484,7 @@
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
+ return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
@@ -494,9 +494,9 @@
struct scsi_device *sdev = to_scsi_device(dev);
unsigned short ms;
- if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
+ if (sscanf(buf, "%hu", &ms) > 0) {
blk_queue_max_hw_sectors(sdev->request_queue, ms);
- return strlen(buf);
+ return count;
}
return -EINVAL;
}
@@ -539,7 +539,7 @@
.slave_configure = slave_configure,
/* lots of sg segments can be handled */
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index b62a288..bd3f415 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1628,10 +1628,10 @@
return USB_STOR_TRANSPORT_ERROR;
}
- if ( (result = usbat_multiple_write(us,
- registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) {
+ result = usbat_multiple_write(us, registers, data, 7);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/*
* Write the 12-byte command header.
@@ -1643,12 +1643,11 @@
* AT SPEED 4 IS UNRELIABLE!!!
*/
- if ((result = usbat_write_block(us,
- USBAT_ATA, srb->cmnd, 12,
- (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) !=
- USB_STOR_TRANSPORT_GOOD)) {
+ result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
+ srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/* If there is response data to be read in then do it here. */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index cc313d1..4680381 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -47,6 +47,8 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/usb/quirks.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
@@ -1297,6 +1299,10 @@
{
int result;
+ /*for these devices we must use the class specific method */
+ if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
+ return -EPERM;
+
result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
if (result < 0)
US_DEBUGP("unable to lock device for reset: %d\n", result);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 49575fb..98b549b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@
0 ),
/* Reported by Jan Dumon <j.dumon@option.com>
- * This device (wrongly) has a vendor-specific device descriptor.
- * The entry is needed so usb-storage can bind to it's mass-storage
+ * These devices (wrongly) have a vendor-specific device descriptor.
+ * These entries are needed so usb-storage can bind to their mass-storage
* interface as an interface driver */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
@@ -1156,6 +1156,90 @@
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
+ "Option",
+ "GI 0452 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
+ "Option",
+ "GI 070x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
+ "Option",
+ "GI 1509 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
+ "Option",
+ "GI 1515 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
+ "Option",
+ "GI 1215 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b1e579c..6152278 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -28,7 +28,7 @@
#define USB_SKEL_PRODUCT_ID 0xfff0
/* table of devices that work with this driver */
-static struct usb_device_id skel_table[] = {
+static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 25eae40..51a8e0d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -641,7 +641,7 @@
kzfree(cbaf);
}
-static struct usb_device_id cbaf_id_table[] = {
+static const struct usb_device_id cbaf_id_table[] = {
{ USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
{ },
};
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index dced419..1c91828 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -868,7 +868,7 @@
* reference that we'll drop.
*
* First we need to determine if the device is a WUSB device (else we
- * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
+ * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
* [FIXME: maybe we'd need something more definitive]. If so, we track
* it's usb_busd and from there, the WUSB HC.
*
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3b52161..2d82739 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -263,7 +263,7 @@
{
int result = 0;
- if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
+ if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
chid = NULL;
mutex_lock(&wusbhc->mutex);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd..2f84137 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@
int bit_idx = __ffs(pending_bits);
int port = (word_idx * BITS_PER_LONG) + bit_idx;
int irq = evtchn_to_irq[port];
+ struct irq_desc *desc;
- if (irq != -1)
- handle_irq(irq, regs);
+ if (irq != -1) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
}
}
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 49503d2..bc0025c 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,7 @@
Version 1.62
------------
-Add sockopt=TCP_NODELAY mount option.
+Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
+to more strictly handle corrupt frames.
Version 1.61
------------
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ed751bb..a1c817e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -205,7 +205,7 @@
struct cifsSesInfo {
struct list_head smb_ses_list;
struct list_head tcon_list;
- struct semaphore sesSem;
+ struct mutex session_mutex;
#if 0
struct cifsUidInfo *uidInfo; /* pointer to user info */
#endif
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 3877737..14d036d 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -415,10 +415,10 @@
__u8 WordCount;
} __attribute__((packed));
/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
+#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount) + 2)
+#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
/*
* Computer Name Length (since Netbios name was length 16 with last byte 0x20)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5646727..88e2bc4 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -363,13 +363,10 @@
__u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char *EAData,
+ const unsigned char *searchName,
+ const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 941441d..9d17df3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -170,19 +170,19 @@
* need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
goto out;
}
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
cFYI(1, ("reconnect tcon rc = %d", rc));
if (rc)
@@ -700,13 +700,13 @@
if (!ses || !ses->server)
return -EIO;
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
return rc;
}
@@ -721,7 +721,7 @@
pSMB->AndXCommand = 0xFF;
rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
session_already_dead:
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
/* if session dead then we do not need to do ulogoff,
since server closed smb session, no sense reporting
@@ -5269,22 +5269,34 @@
cifs_buf_release(pSMB);
return rc;
}
+
#ifdef CONFIG_CIFS_XATTR
+/*
+ * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
+ * function used by listxattr and getxattr type calls. When ea_name is set,
+ * it looks for that attribute name and stuffs that value into the EAData
+ * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
+ * buffer. In both cases, the return value is either the length of the
+ * resulting data or a negative error code. If EAData is a NULL pointer then
+ * the data isn't copied to it, but the length is returned.
+ */
ssize_t
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- char *EAData, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName, const unsigned char *ea_name,
+ char *EAData, size_t buf_size,
+ const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
- int name_len;
+ int list_len;
+ struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
- __u16 params, byte_count;
+ char *end_of_smb;
+ __u16 params, byte_count, data_offset;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
@@ -5294,22 +5306,22 @@
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
+ list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
+ list_len++; /* trailing null */
+ list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ list_len = strnlen(searchName, PATH_MAX);
+ list_len++; /* trailing null */
+ strncpy(pSMB->FileName, searchName, list_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
+ params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
+ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5334,85 +5346,114 @@
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in QueryAllEAs = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ goto QAllEAsOut;
+ }
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = 0;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc */
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- rc += temp_fea->name_len;
- /* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if (rc < (int)buf_size) {
- memcpy(EAData, "user.", 5);
- EAData += 5;
- memcpy(EAData, temp_ptr,
- temp_fea->name_len);
- EAData += temp_fea->name_len;
- /* null terminate name */
- *EAData = 0;
- EAData = EAData + 1;
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- name_len -= value_len;
- temp_ptr += value_len;
- /* BB check that temp_ptr is still
- within the SMB BB*/
- /* no trailing null to account for
- in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
+ /* BB also check enough total bytes returned */
+ /* BB we need to improve the validity checking
+ of these trans2 responses */
+
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc || (pSMBr->ByteCount < 4)) {
+ rc = -EIO; /* bad smb */
+ goto QAllEAsOut;
+ }
+
+ /* check that length of list is not more than bcc */
+ /* check that each entry does not go beyond length
+ of list */
+ /* check that each element of each entry does not
+ go beyond end of list */
+ /* validate_trans2_offsets() */
+ /* BB check if start of smb + data_offset > &bcc+ bcc */
+
+ data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+ ea_response_data = (struct fealist *)
+ (((char *) &pSMBr->hdr.Protocol) + data_offset);
+
+ list_len = le32_to_cpu(ea_response_data->list_len);
+ cFYI(1, ("ea length %d", list_len));
+ if (list_len <= 8) {
+ cFYI(1, ("empty EA list returned from server"));
+ goto QAllEAsOut;
+ }
+
+ /* make sure list_len doesn't go past end of SMB */
+ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+ if ((char *)ea_response_data + list_len > end_of_smb) {
+ cFYI(1, ("EA list appears to go beyond SMB"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
+
+ /* account for ea list len */
+ list_len -= 4;
+ temp_fea = ea_response_data->list;
+ temp_ptr = (char *)temp_fea;
+ while (list_len > 0) {
+ unsigned int name_len;
+ __u16 value_len;
+
+ list_len -= 4;
+ temp_ptr += 4;
+ /* make sure we can read name_len and value_len */
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
+
+ name_len = temp_fea->name_len;
+ value_len = le16_to_cpu(temp_fea->value_len);
+ list_len -= name_len + 1 + value_len;
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
+
+ if (ea_name) {
+ if (strncmp(ea_name, temp_ptr, name_len) == 0) {
+ temp_ptr += name_len + 1;
+ rc = value_len;
+ if (buf_size == 0)
+ goto QAllEAsOut;
+ if ((size_t)value_len > buf_size) {
+ rc = -ERANGE;
+ goto QAllEAsOut;
}
+ memcpy(EAData, temp_ptr, value_len);
+ goto QAllEAsOut;
+ }
+ } else {
+ /* account for prefix user. and trailing null */
+ rc += (5 + 1 + name_len);
+ if (rc < (int) buf_size) {
+ memcpy(EAData, "user.", 5);
+ EAData += 5;
+ memcpy(EAData, temp_ptr, name_len);
+ EAData += name_len;
+ /* null terminate name */
+ *EAData = 0;
+ ++EAData;
+ } else if (buf_size == 0) {
+ /* skip copy - calc size only */
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+ break;
}
}
+ temp_ptr += name_len + 1 + value_len;
+ temp_fea = (struct fea *)temp_ptr;
}
+
+ /* didn't find the named attribute */
+ if (ea_name)
+ rc = -ENODATA;
+
+QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QAllEAsRetry;
@@ -5420,155 +5461,6 @@
return (ssize_t)rc;
}
-ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
-{
- TRANSACTION2_QPI_REQ *pSMB = NULL;
- TRANSACTION2_QPI_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- struct fea *temp_fea;
- char *temp_ptr;
- __u16 params, byte_count;
-
- cFYI(1, ("In Query EA path %s", searchName));
-QEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
- }
-
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
- byte_count = params + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(params);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += byte_count;
- pSMB->ByteCount = cpu_to_le16(byte_count);
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("Send error in Query EA = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = -ENODATA;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc*/
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- /* loop through checking if we have a matching
- name and then return the associated value */
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if (memcmp(temp_fea->name, ea_name,
- temp_fea->name_len) == 0) {
- /* found a match */
- rc = value_len;
- /* account for prefix user. and trailing null */
- if (rc <= (int)buf_size) {
- memcpy(ea_value,
- temp_fea->name+temp_fea->name_len+1,
- rc);
- /* ea values, unlike ea
- names, are not null
- terminated */
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- }
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- name_len -= value_len;
- temp_ptr += value_len;
- /* No trailing null to account for in
- value_len. Go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
- }
- }
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto QEARetry;
-
- return (ssize_t)rc;
-}
-
int
CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
const char *ea_name, const void *ea_value,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e9e09c..45eb6cb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2388,13 +2388,13 @@
*/
cifs_put_tcp_session(srvTcp);
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
if (pSesInfo->need_reconnect) {
cFYI(1, ("Session needs reconnect"));
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
}
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
} else if (!rc) {
cFYI(1, ("Existing smb sess not found"));
pSesInfo = sesInfoAlloc();
@@ -2437,12 +2437,12 @@
}
pSesInfo->linux_uid = volume_info->linux_uid;
pSesInfo->overrideSecFlg = volume_info->secFlg;
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
/* BB FIXME need to pass vol->secFlgs BB */
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
}
/* search for existing tcon to this server share */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e3fda97..8bdbc81 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -111,6 +111,7 @@
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
+ cifs_i->server_eof = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
@@ -366,7 +367,7 @@
char ea_value[4];
__u32 mode;
- rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
+ rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d27d4ec..d147499 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -79,7 +79,7 @@
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
- init_MUTEX(&ret_buf->sesSem);
+ mutex_init(&ret_buf->session_mutex);
}
return ret_buf;
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a75afa3..3e2ef0d 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -244,7 +244,7 @@
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
@@ -252,7 +252,7 @@
goto get_ea_exit;
ea_name += 4; /* skip past os2. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
@@ -364,8 +364,8 @@
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size,
- cifs_sb->local_nls,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
+ buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 874d169..4cedc91 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1014,7 +1014,7 @@
atomic_t s_lock_busy;
/* locality groups */
- struct ext4_locality_group *s_locality_groups;
+ struct ext4_locality_group __percpu *s_locality_groups;
/* for write statistics */
unsigned long s_sectors_written_start;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51d9e33..eb7e942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -865,13 +865,10 @@
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
- outarg.off, outarg.len);
-
-err_unlock:
+ if (fc->sb) {
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+ }
up_read(&fc->killsb);
return err;
@@ -884,10 +881,15 @@
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -EINVAL;
- char buf[FUSE_NAME_MAX+1];
+ int err = -ENOMEM;
+ char *buf;
struct qstr name;
+ buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
+ err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -910,16 +912,14 @@
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
-
-err_unlock:
+ if (fc->sb)
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
up_read(&fc->killsb);
+ kfree(buf);
return err;
err:
+ kfree(buf);
fuse_copy_finish(cs);
return err;
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7b8da94..0c1d0b8 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1061,8 +1061,8 @@
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
{
- struct inode *aspace = page->mapping->host;
- struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
+ struct address_space *mapping = page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f426633..454d4b4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
-#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
@@ -60,7 +59,6 @@
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
@@ -154,12 +152,14 @@
static void glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct inode *aspace = gl->gl_aspace;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct kmem_cache *cachep = gfs2_glock_cachep;
- if (aspace)
- gfs2_aspace_put(aspace);
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
+ if (mapping)
+ cachep = gfs2_glock_aspace_cachep;
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
}
/**
@@ -712,7 +712,6 @@
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
- down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
@@ -725,7 +724,6 @@
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- up_read(&gfs2_umount_flush_sem);
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
@@ -750,10 +748,11 @@
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
+ struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
struct gfs2_glock *gl, *tmp;
unsigned int hash = gl_hash(sdp, &name);
- int error;
+ struct address_space *mapping;
read_lock(gl_lock_addr(hash));
gl = search_bucket(hash, sdp, &name);
@@ -765,7 +764,10 @@
if (!create)
return -ENOENT;
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+ if (glops->go_flags & GLOF_ASPACE)
+ gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+ else
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
if (!gl)
return -ENOMEM;
@@ -784,18 +786,18 @@
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
- gl->gl_aspace = NULL;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
- /* If this glock protects actual on-disk data or metadata blocks,
- create a VFS inode to manage the pages/buffers holding them. */
- if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
- gl->gl_aspace = gfs2_aspace_get(sdp);
- if (!gl->gl_aspace) {
- error = -ENOMEM;
- goto fail;
- }
+ mapping = gfs2_glock2aspace(gl);
+ if (mapping) {
+ mapping->a_ops = &gfs2_meta_aops;
+ mapping->host = s->s_bdev->bd_inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = s->s_bdi;
+ mapping->writeback_index = 0;
}
write_lock(gl_lock_addr(hash));
@@ -812,10 +814,6 @@
*glp = gl;
return 0;
-
-fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
- return error;
}
/**
@@ -1510,35 +1508,10 @@
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned long t;
unsigned int x;
- int cont;
- t = jiffies;
-
- for (;;) {
- cont = 0;
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
- cont = 1;
- }
-
- if (!cont)
- break;
-
- if (time_after_eq(jiffies,
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
- fs_warn(sdp, "Unmount seems to be stalled. "
- "Dumping lock state...\n");
- gfs2_dump_lockstate(sdp);
- t = jiffies;
- }
-
- down_write(&gfs2_umount_flush_sem);
- invalidate_inodes(sdp->sd_vfs);
- up_write(&gfs2_umount_flush_sem);
- msleep(10);
- }
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(clear_glock, sdp, x);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
gfs2_dump_lockstate(sdp);
@@ -1685,7 +1658,7 @@
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c0262fa..2bda191 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,6 +180,13 @@
return gl->gl_state == LM_ST_SHARED;
}
+static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
+{
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ return (struct address_space *)(gl + 1);
+ return NULL;
+}
+
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78554ac..38e3749 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -87,7 +87,7 @@
static void rgrp_go_sync(struct gfs2_glock *gl)
{
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
@@ -113,7 +113,7 @@
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
BUG_ON(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
@@ -134,7 +134,7 @@
static void inode_go_sync(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (ip && !S_ISREG(ip->i_inode.i_mode))
@@ -183,7 +183,7 @@
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) {
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
@@ -282,7 +282,8 @@
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
- return !gl->gl_aspace->i_mapping->nrpages;
+ const struct address_space *mapping = (const struct address_space *)(gl + 1);
+ return !mapping->nrpages;
}
/**
@@ -387,8 +388,7 @@
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
- gl->gl_state == LM_ST_SHARED &&
- ip && test_bit(GIF_USER, &ip->i_flags)) {
+ gl->gl_state == LM_ST_SHARED && ip) {
gfs2_glock_hold(gl);
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
gfs2_glock_put_nolock(gl);
@@ -407,6 +407,7 @@
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -418,6 +419,7 @@
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index bc0ad15..b8025e5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -162,6 +162,8 @@
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
+ const unsigned long go_flags;
+#define GLOF_ASPACE 1
};
enum {
@@ -225,7 +227,6 @@
struct gfs2_sbd *gl_sbd;
- struct inode *gl_aspace;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
struct delayed_work gl_work;
@@ -258,7 +259,6 @@
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
- GIF_USER = 4, /* user inode, not metadata addr space */
};
@@ -451,7 +451,6 @@
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
- unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e220f4..b1bf269 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -45,7 +45,7 @@
struct gfs2_inode *ip = GFS2_I(inode);
u64 *no_addr = opaque;
- if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
+ if (ip->i_no_addr == *no_addr)
return 1;
return 0;
@@ -58,7 +58,6 @@
inode->i_ino = (unsigned long)*no_addr;
ip->i_no_addr = *no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -84,7 +83,7 @@
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
+ if (ip->i_no_addr == data->no_addr) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
data->skipped = 1;
return 0;
@@ -103,7 +102,6 @@
return 1;
inode->i_ino = (unsigned long)(data->no_addr);
ip->i_no_addr = data->no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e5e0e7..569b462 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -30,7 +30,10 @@
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- kmem_cache_free(gfs2_glock_cachep, gl);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+ else
+ kmem_cache_free(gfs2_glock_cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
return;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index de97632..adc260f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -528,9 +528,9 @@
gfs2_pin(sdp, bd->bd_bh);
tr->tr_num_databuf_new++;
sdp->sd_log_num_databuf++;
- list_add(&le->le_list, &sdp->sd_log_le_databuf);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
} else {
- list_add(&le->le_list, &sdp->sd_log_le_ordered);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
}
out:
gfs2_log_unlock(sdp);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 5b31f77..a88fadc 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,6 +52,22 @@
atomic_set(&gl->gl_ail_count, 0);
}
+static void gfs2_init_gl_aspace_once(void *foo)
+{
+ struct gfs2_glock *gl = foo;
+ struct address_space *mapping = (struct address_space *)(gl + 1);
+
+ gfs2_init_glock_once(gl);
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+}
+
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
@@ -78,6 +94,14 @@
if (!gfs2_glock_cachep)
goto fail;
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ sizeof(struct gfs2_glock) +
+ sizeof(struct address_space),
+ 0, 0, gfs2_init_gl_aspace_once);
+
+ if (!gfs2_glock_aspace_cachep)
+ goto fail;
+
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
@@ -144,6 +168,9 @@
if (gfs2_inode_cachep)
kmem_cache_destroy(gfs2_inode_cachep);
+ if (gfs2_glock_aspace_cachep)
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+
if (gfs2_glock_cachep)
kmem_cache_destroy(gfs2_glock_cachep);
@@ -169,6 +196,7 @@
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
kmem_cache_destroy(gfs2_inode_cachep);
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 6f68a5f..0bb12c8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -93,49 +93,13 @@
return err;
}
-static const struct address_space_operations aspace_aops = {
+const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.sync_page = block_sync_page,
};
/**
- * gfs2_aspace_get - Create and initialize a struct inode structure
- * @sdp: the filesystem the aspace is in
- *
- * Right now a struct inode is just a struct inode. Maybe Linux
- * will supply a more lightweight address space construct (that works)
- * in the future.
- *
- * Make sure pages/buffers in this aspace aren't in high memory.
- *
- * Returns: the aspace
- */
-
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
-{
- struct inode *aspace;
- struct gfs2_inode *ip;
-
- aspace = new_inode(sdp->sd_vfs);
- if (aspace) {
- mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
- aspace->i_mapping->a_ops = &aspace_aops;
- aspace->i_size = MAX_LFS_FILESIZE;
- ip = GFS2_I(aspace);
- clear_bit(GIF_USER, &ip->i_flags);
- insert_inode_hash(aspace);
- }
- return aspace;
-}
-
-void gfs2_aspace_put(struct inode *aspace)
-{
- remove_inode_hash(aspace);
- iput(aspace);
-}
-
-/**
* gfs2_meta_sync - Sync all buffers associated with a glock
* @gl: The glock
*
@@ -143,7 +107,7 @@
void gfs2_meta_sync(struct gfs2_glock *gl)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
int error;
filemap_fdatawrite(mapping);
@@ -164,7 +128,7 @@
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
struct gfs2_sbd *sdp = gl->gl_sbd;
struct page *page;
struct buffer_head *bh;
@@ -344,8 +308,10 @@
void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
- struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
+ struct address_space *mapping = bh->b_page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+
if (test_clear_buffer_pinned(bh)) {
list_del_init(&bd->bd_le.le_list);
if (meta) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index de270c2..6a1d9ba 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -37,8 +37,16 @@
0, from_head - to_head);
}
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
-void gfs2_aspace_put(struct inode *aspace);
+extern const struct address_space_operations gfs2_meta_aops;
+
+static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ if (mapping->a_ops == &gfs2_meta_aops)
+ return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
+ else
+ return inode->i_sb->s_fs_info;
+}
void gfs2_meta_sync(struct gfs2_glock *gl);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a86ed63..a054b52 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
- gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
}
@@ -1241,10 +1240,9 @@
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
+ invalidate_inodes(sb);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
- while (invalidate_inodes(sb))
- yield();
fail_sys:
gfs2_sys_fs_del(sdp);
fail:
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index b9dd3da..e5e2262 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -722,8 +722,7 @@
int ret = 0;
/* Check this is a "normal" inode, etc */
- if (!test_bit(GIF_USER, &ip->i_flags) ||
- (current->flags & PF_MEMALLOC))
+ if (current->flags & PF_MEMALLOC)
return 0;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
@@ -860,6 +859,7 @@
gfs2_clear_rgrpd(sdp);
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
+ invalidate_inodes(sdp->sd_vfs);
gfs2_gl_hash_clear(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
@@ -1194,7 +1194,7 @@
{
struct gfs2_inode *ip = GFS2_I(inode);
- if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
+ if (inode->i_nlink) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
@@ -1212,18 +1212,12 @@
{
struct gfs2_inode *ip = GFS2_I(inode);
- /* This tells us its a "real" inode and not one which only
- * serves to contain an address space (see rgrp.c, meta_io.c)
- * which therefore doesn't have its own glocks.
- */
- if (test_bit(GIF_USER, &ip->i_flags)) {
- ip->i_gl->gl_object = NULL;
- gfs2_glock_put(ip->i_gl);
- ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ if (ip->i_iopen_gh.gh_gl) {
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
@@ -1358,9 +1352,6 @@
struct gfs2_holder gh;
int error;
- if (!test_bit(GIF_USER, &ip->i_flags))
- goto out;
-
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0dc3462..a0db1c9 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(quota_simul_sync, 1);
-TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -491,7 +490,6 @@
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
&tune_attr_quota_simul_sync.attr,
- &tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f6a7efa..226f2bf 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -21,6 +21,7 @@
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly;
struct kmem_cache *gfs2_inode_cachep __read_mostly;
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 33e96b0..b432e04 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -145,6 +145,7 @@
extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_glock_aspace_cachep;
extern struct kmem_cache *gfs2_inode_cachep;
extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 46d779a..1d8d5c8 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -57,12 +57,12 @@
}
#endif
-static inline struct nfs_iostats *nfs_alloc_iostats(void)
+static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
{
return alloc_percpu(struct nfs_iostats);
}
-static inline void nfs_free_iostats(struct nfs_iostats *stats)
+static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{
if (stats != NULL)
free_percpu(stats);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 187dd07..9d1e5de 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -388,8 +388,7 @@
ret = -ENOENT;
goto out;
}
- if (blocknrp != NULL)
- *blocknrp = blocknr;
+ *blocknrp = blocknr;
out:
kunmap_atomic(kaddr, KM_USER0);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index d6b2b83..313d0a2 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,6 +26,7 @@
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
+#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
@@ -107,20 +108,28 @@
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
- return -EFAULT;
+ goto out;
mutex_lock(&nilfs->ns_mount_mutex);
+
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
cpfile, cpmode.cm_cno, cpmode.cm_mode);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- mutex_unlock(&nilfs->ns_mount_mutex);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
mutex_unlock(&nilfs->ns_mount_mutex);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -135,16 +144,23 @@
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cno, argp, sizeof(cno)))
- return -EFAULT;
+ goto out;
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_delete_checkpoint(cpfile, cno);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -496,12 +512,19 @@
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(argv, argp, sizeof(argv)))
- return -EFAULT;
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+ ret = -EFAULT;
+ if (copy_from_user(argv, argp, sizeof(argv)))
+ goto out;
+
+ ret = -EINVAL;
nsegs = argv[4].v_nmembs;
if (argv[4].v_size != argsz[4])
- return -EINVAL;
+ goto out;
+
/*
* argv[4] points to segment numbers this ioctl cleans. We
* use kmalloc() for its buffer because memory used for the
@@ -509,9 +532,10 @@
*/
kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
nsegs * sizeof(__u64));
- if (IS_ERR(kbufs[4]))
- return PTR_ERR(kbufs[4]);
-
+ if (IS_ERR(kbufs[4])) {
+ ret = PTR_ERR(kbufs[4]);
+ goto out;
+ }
nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
for (n = 0; n < 4; n++) {
@@ -563,10 +587,12 @@
nilfs_remove_all_gcinode(nilfs);
clear_nilfs_gc_running(nilfs);
- out_free:
+out_free:
while (--n >= 0)
vfree(kbufs[n]);
kfree(kbufs[4]);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -575,13 +601,17 @@
{
__u64 cno;
int ret;
+ struct the_nilfs *nilfs;
ret = nilfs_construct_segment(inode->i_sb);
if (ret < 0)
return ret;
if (argp != NULL) {
- cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1;
+ nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ down_read(&nilfs->ns_segctor_sem);
+ cno = nilfs->ns_cno - 1;
+ up_read(&nilfs->ns_segctor_sem);
if (copy_to_user(argp, &cno, sizeof(cno)))
return -EFAULT;
}
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index c9c96c7..017bedc 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -39,7 +39,6 @@
NILFS_SEG_FAIL_IO,
NILFS_SEG_FAIL_MAGIC,
NILFS_SEG_FAIL_SEQ,
- NILFS_SEG_FAIL_CHECKSUM_SEGSUM,
NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
NILFS_SEG_FAIL_CHECKSUM_FULL,
NILFS_SEG_FAIL_CONSISTENCY,
@@ -71,10 +70,6 @@
printk(KERN_WARNING
"NILFS warning: Sequence number mismatch\n");
break;
- case NILFS_SEG_FAIL_CHECKSUM_SEGSUM:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in segment summary\n");
- break;
case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
printk(KERN_WARNING
"NILFS warning: Checksum error in super root\n");
@@ -206,19 +201,15 @@
* @pseg_start: start disk block number of partial segment
* @seg_seq: sequence number requested
* @ssi: pointer to nilfs_segsum_info struct to store information
- * @full_check: full check flag
- * (0: only checks segment summary CRC, 1: data CRC)
*/
static int
load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
- u64 seg_seq, struct nilfs_segsum_info *ssi,
- int full_check)
+ u64 seg_seq, struct nilfs_segsum_info *ssi)
{
struct buffer_head *bh_sum;
struct nilfs_segment_summary *sum;
- unsigned long offset, nblock;
- u64 check_bytes;
- u32 crc, crc_sum;
+ unsigned long nblock;
+ u32 crc;
int ret = NILFS_SEG_FAIL_IO;
bh_sum = sb_bread(sbi->s_super, pseg_start);
@@ -237,34 +228,24 @@
ret = NILFS_SEG_FAIL_SEQ;
goto failed;
}
- if (full_check) {
- offset = sizeof(sum->ss_datasum);
- check_bytes =
- ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits);
- nblock = ssi->nblocks;
- crc_sum = le32_to_cpu(sum->ss_datasum);
- ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
- } else { /* only checks segment summary */
- offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum);
- check_bytes = ssi->sumbytes;
- nblock = ssi->nsumblk;
- crc_sum = le32_to_cpu(sum->ss_sumsum);
- ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM;
- }
+ nblock = ssi->nblocks;
if (unlikely(nblock == 0 ||
nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
/* This limits the number of blocks read in the CRC check */
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto failed;
}
- if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes,
+ if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
+ ((u64)nblock << sbi->s_super->s_blocksize_bits),
pseg_start, nblock)) {
ret = NILFS_SEG_FAIL_IO;
goto failed;
}
- if (crc == crc_sum)
+ if (crc == le32_to_cpu(sum->ss_datasum))
ret = 0;
+ else
+ ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
failed:
brelse(bh_sum);
out:
@@ -598,7 +579,7 @@
while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO) {
err = -EIO;
@@ -821,7 +802,7 @@
for (;;) {
/* Load segment summary */
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO)
goto failed;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 645c786..ab56fe4 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,6 +40,11 @@
};
+static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
+ struct the_nilfs *nilfs);
+static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
+
+
static struct kmem_cache *nilfs_segbuf_cachep;
static void nilfs_segbuf_init_once(void *obj)
@@ -302,6 +307,19 @@
}
}
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
+{
+ struct nilfs_segment_buffer *segbuf;
+ int ret = 0;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ ret = nilfs_segbuf_write(segbuf, nilfs);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 6af1630..94dfd35 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -166,13 +166,10 @@
segbuf->sb_sum.nfileblk++;
}
-int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
- struct the_nilfs *nilfs);
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-
void nilfs_clear_logs(struct list_head *logs);
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
int nilfs_wait_on_logs(struct list_head *logs);
static inline void nilfs_destroy_logs(struct list_head *logs)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 105b508..ada2f1b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1764,14 +1764,9 @@
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_segment_buffer *segbuf;
- int ret = 0;
+ int ret;
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_segbuf_write(segbuf, nilfs);
- if (ret)
- break;
- }
+ ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
return ret;
}
@@ -1937,8 +1932,7 @@
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
int update_sr = (sci->sc_super_root != NULL);
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -2020,7 +2014,7 @@
if (update_sr) {
nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
- sbi->s_super->s_dirt = 1;
+ set_nilfs_sb_dirty(nilfs);
clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2425,43 +2419,43 @@
return err;
}
-struct nilfs_segctor_req {
- int mode;
- __u32 seq_accepted;
- int sc_err; /* construction failure */
- int sb_err; /* super block writeback failure */
-};
-
#define FLUSH_FILE_BIT (0x1) /* data file only */
#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
-static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_accept - record accepted sequence count of log-write requests
+ * @sci: segment constructor object
+ */
+static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
- req->sc_err = req->sb_err = 0;
spin_lock(&sci->sc_state_lock);
- req->seq_accepted = sci->sc_seq_request;
+ sci->sc_seq_accepted = sci->sc_seq_request;
spin_unlock(&sci->sc_state_lock);
if (sci->sc_timer)
del_timer_sync(sci->sc_timer);
}
-static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_notify - notify the result of request to caller threads
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ * @err: error code to be notified
+ */
+static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
{
/* Clear requests (even when the construction failed) */
spin_lock(&sci->sc_state_lock);
- if (req->mode == SC_LSEG_SR) {
+ if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
- sci->sc_seq_done = req->seq_accepted;
- nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
+ sci->sc_seq_done = sci->sc_seq_accepted;
+ nilfs_segctor_wakeup(sci, err);
sci->sc_flush_request = 0;
} else {
- if (req->mode == SC_FLUSH_FILE)
+ if (mode == SC_FLUSH_FILE)
sci->sc_flush_request &= ~FLUSH_FILE_BIT;
- else if (req->mode == SC_FLUSH_DAT)
+ else if (mode == SC_FLUSH_DAT)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
@@ -2472,30 +2466,37 @@
spin_unlock(&sci->sc_state_lock);
}
-static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_construct - form logs and write them to disk
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ */
+static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
int err = 0;
+ nilfs_segctor_accept(sci);
+
if (nilfs_discontinued(nilfs))
- req->mode = SC_LSEG_SR;
- if (!nilfs_segctor_confirm(sci)) {
- err = nilfs_segctor_do_construct(sci, req->mode);
- req->sc_err = err;
- }
+ mode = SC_LSEG_SR;
+ if (!nilfs_segctor_confirm(sci))
+ err = nilfs_segctor_do_construct(sci, mode);
+
if (likely(!err)) {
- if (req->mode != SC_FLUSH_DAT)
+ if (mode != SC_FLUSH_DAT)
atomic_set(&nilfs->ns_ndirtyblks, 0);
if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
- req->sb_err = nilfs_commit_super(sbi,
- nilfs_altsb_need_update(nilfs));
+ err = nilfs_commit_super(
+ sbi, nilfs_altsb_need_update(nilfs));
up_write(&nilfs->ns_sem);
}
}
+
+ nilfs_segctor_notify(sci, mode, err);
return err;
}
@@ -2526,7 +2527,6 @@
struct nilfs_sc_info *sci = NILFS_SC(sbi);
struct the_nilfs *nilfs = sbi->s_nilfs;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
int err;
if (unlikely(!sci))
@@ -2547,10 +2547,8 @@
list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
for (;;) {
- nilfs_segctor_accept(sci, &req);
- err = nilfs_segctor_construct(sci, &req);
+ err = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
- nilfs_segctor_notify(sci, &req);
if (likely(!err))
break;
@@ -2560,6 +2558,16 @@
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
+ if (nilfs_test_opt(sbi, DISCARD)) {
+ int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
+ sci->sc_nfreesegs);
+ if (ret) {
+ printk(KERN_WARNING
+ "NILFS warning: error %d on discard request, "
+ "turning discards off for the device\n", ret);
+ nilfs_clear_opt(sbi, DISCARD);
+ }
+ }
out_unlock:
sci->sc_freesegs = NULL;
@@ -2573,13 +2581,9 @@
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = mode };
nilfs_transaction_lock(sbi, &ti, 0);
-
- nilfs_segctor_accept(sci, &req);
- nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ nilfs_segctor_construct(sci, mode);
/*
* Unclosed segment should be retried. We do this using sc_timer.
@@ -2635,6 +2639,7 @@
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
struct timer_list timer;
int timeout = 0;
@@ -2680,7 +2685,6 @@
} else {
DEFINE_WAIT(wait);
int should_sleep = 1;
- struct the_nilfs *nilfs;
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
@@ -2701,8 +2705,8 @@
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
time_after_eq(jiffies, sci->sc_timer->expires));
- nilfs = sci->sc_sbi->s_nilfs;
- if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs))
+
+ if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
}
goto loop;
@@ -2797,12 +2801,9 @@
do {
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
nilfs_transaction_lock(sbi, &ti, 0);
- nilfs_segctor_accept(sci, &req);
- ret = nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sbi);
} while (ret && retrycount-- > 0);
@@ -2865,8 +2866,15 @@
struct the_nilfs *nilfs = sbi->s_nilfs;
int err;
- /* Each field of nilfs_segctor is cleared through the initialization
- of super-block info */
+ if (NILFS_SC(sbi)) {
+ /*
+ * This happens if the filesystem was remounted
+ * read/write after nilfs_error degenerated it into a
+ * read-only mount.
+ */
+ nilfs_detach_segment_constructor(sbi);
+ }
+
sbi->s_sc_info = nilfs_segctor_new(sbi);
if (!sbi->s_sc_info)
return -ENOMEM;
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 3d3ab2f..3155e0c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -116,6 +116,7 @@
* @sc_wait_daemon: Daemon wait queue
* @sc_wait_task: Start/end wait queue to control segctord task
* @sc_seq_request: Request counter
+ * @sc_seq_accept: Accepted request count
* @sc_seq_done: Completion counter
* @sc_sync: Request of explicit sync operation
* @sc_interval: Timeout value of background construction
@@ -169,6 +170,7 @@
wait_queue_head_t sc_wait_task;
__u32 sc_seq_request;
+ __u32 sc_seq_accepted;
__u32 sc_seq_done;
int sc_sync;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8173fae..92579cc 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -96,9 +96,6 @@
if (!(sb->s_flags & MS_RDONLY)) {
struct the_nilfs *nilfs = sbi->s_nilfs;
- if (!nilfs_test_opt(sbi, ERRORS_CONT))
- nilfs_detach_segment_constructor(sbi);
-
down_write(&nilfs->ns_sem);
if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
nilfs->ns_mount_state |= NILFS_ERROR_FS;
@@ -301,7 +298,7 @@
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
nilfs->ns_sbwtime[1] = t;
}
- sbi->s_super->s_dirt = 0;
+ clear_nilfs_sb_dirty(nilfs);
return nilfs_sync_super(sbi, dupsb);
}
@@ -345,7 +342,7 @@
err = nilfs_construct_segment(sb);
down_write(&nilfs->ns_sem);
- if (sb->s_dirt)
+ if (nilfs_sb_dirty(nilfs))
nilfs_commit_super(sbi, 1);
up_write(&nilfs->ns_sem);
@@ -481,6 +478,8 @@
seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
seq_printf(seq, ",norecovery");
+ if (nilfs_test_opt(sbi, DISCARD))
+ seq_printf(seq, ",discard");
return 0;
}
@@ -550,7 +549,7 @@
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
- Opt_err,
+ Opt_discard, Opt_err,
};
static match_table_t tokens = {
@@ -561,6 +560,7 @@
{Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"},
{Opt_norecovery, "norecovery"},
+ {Opt_discard, "discard"},
{Opt_err, NULL}
};
@@ -614,6 +614,9 @@
case Opt_norecovery:
nilfs_set_opt(sbi, NORECOVERY);
break;
+ case Opt_discard:
+ nilfs_set_opt(sbi, DISCARD);
+ break;
default:
printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 6241e17..92733d5 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -646,6 +646,44 @@
goto out;
}
+int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
+ size_t nsegs)
+{
+ sector_t seg_start, seg_end;
+ sector_t start = 0, nblocks = 0;
+ unsigned int sects_per_block;
+ __u64 *sn;
+ int ret = 0;
+
+ sects_per_block = (1 << nilfs->ns_blocksize_bits) /
+ bdev_logical_block_size(nilfs->ns_bdev);
+ for (sn = segnump; sn < segnump + nsegs; sn++) {
+ nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
+
+ if (!nblocks) {
+ start = seg_start;
+ nblocks = seg_end - seg_start + 1;
+ } else if (start + nblocks == seg_start) {
+ nblocks += seg_end - seg_start + 1;
+ } else {
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS,
+ DISCARD_FL_BARRIER);
+ if (ret < 0)
+ return ret;
+ nblocks = 0;
+ }
+ }
+ if (nblocks)
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS, DISCARD_FL_BARRIER);
+ return ret;
+}
+
int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{
struct inode *dat = nilfs_dat_inode(nilfs);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 589786e..e9795f1 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -38,6 +38,7 @@
the latest checkpoint was loaded */
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
THE_NILFS_GC_RUNNING, /* gc process is running */
+ THE_NILFS_SB_DIRTY, /* super block is dirty */
};
/**
@@ -197,6 +198,7 @@
THE_NILFS_FNS(LOADED, loaded)
THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
+THE_NILFS_FNS(SB_DIRTY, sb_dirty)
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
@@ -221,6 +223,7 @@
void put_nilfs(struct the_nilfs *);
int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
+int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 600d2d2..791c088 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -46,6 +46,7 @@
ocfs2_stack_o2cb-objs := stack_o2cb.o
ocfs2_stack_user-objs := stack_user.o
+obj-$(CONFIG_OCFS2_FS) += dlmfs/
# cluster/ is always needed when OCFS2_FS for masklog support
obj-$(CONFIG_OCFS2_FS) += cluster/
obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d17bdc7..2bbe1ec 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1050,7 +1050,8 @@
strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
- eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
+ eb->h_suballoc_slot =
+ cpu_to_le16(meta_ac->ac_alloc_slot);
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
@@ -6037,7 +6038,7 @@
if (status < 0)
mlog_errno(status);
else
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7e9df11..4c2a6d2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -577,8 +577,9 @@
goto bail;
}
- /* We should already CoW the refcounted extent. */
- BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+ /* We should already CoW the refcounted extent in case of create. */
+ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
+
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 1cd2934..b39da87 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -112,6 +112,7 @@
define_mask(XATTR),
define_mask(QUOTA),
define_mask(REFCOUNT),
+ define_mask(BASTS),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 9b4d117..3dfddbe 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -114,6 +114,7 @@
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
+#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
@@ -194,9 +195,9 @@
* previous token if args expands to nothing.
*/
#define __mlog_printk(level, fmt, args...) \
- printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \
- __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \
- ##args)
+ printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
+ task_pid_nr(current), __mlog_cpu_guess, \
+ __PRETTY_FUNCTION__, __LINE__ , ##args)
#define mlog(mask, fmt, args...) do { \
u64 __m = MLOG_MASK_PREFIX | (mask); \
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 28c3ec2..765d66c 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2439,7 +2439,7 @@
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
- dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num);
+ dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
dx_root->dr_blkno = cpu_to_le64(dr_blkno);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 1903613..dcebf0d 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,8 +1,7 @@
EXTRA_CFLAGS += -Ifs/ocfs2
-obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
-ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 344bcf9..b4f99de 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -310,7 +310,7 @@
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
- if (dlm_joined(dlm)) {
+ if (dlm_domain_fully_joined(dlm)) {
status = dlm_do_recovery(dlm);
if (status == -EAGAIN) {
/* do not sleep, recheck immediately. */
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
new file mode 100644
index 0000000..df69b48
--- /dev/null
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Ifs/ocfs2
+
+obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+
+ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
similarity index 82%
rename from fs/ocfs2/dlm/dlmfs.c
rename to fs/ocfs2/dlmfs/dlmfs.c
index 02bf178..1b0de15 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -43,24 +43,17 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
+#include <linux/poll.h>
#include <asm/uaccess.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "stackglue.h"
#include "userdlm.h"
-
#include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
-#include "ocfs2_lockingver.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
@@ -71,15 +64,46 @@
struct workqueue_struct *user_dlm_worker;
+
+
/*
- * This is the userdlmfs locking protocol version.
+ * These are the ABI capabilities of dlmfs.
*
- * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ * Over time, dlmfs has added some features that were not part of the
+ * initial ABI. Unfortunately, some of these features are not detectable
+ * via standard usage. For example, Linux's default poll always returns
+ * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * added poll support. Instead, we provide this list of new capabilities.
+ *
+ * Capabilities is a read-only attribute. We do it as a module parameter
+ * so we can discover it whether dlmfs is built in, loaded, or even not
+ * loaded.
+ *
+ * The ABI features are local to this machine's dlmfs mount. This is
+ * distinct from the locking protocol, which is concerned with inter-node
+ * interaction.
+ *
+ * Capabilities:
+ * - bast : POLLIN against the file descriptor of a held lock
+ * signifies a bast fired on the lock.
*/
-static const struct dlm_protocol_version user_locking_protocol = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
-};
+#define DLMFS_CAPABILITIES "bast stackglue"
+extern int param_set_dlmfs_capabilities(const char *val,
+ struct kernel_param *kp)
+{
+ printk(KERN_ERR "%s: readonly parameter\n", kp->name);
+ return -EINVAL;
+}
+static int param_get_dlmfs_capabilities(char *buffer,
+ struct kernel_param *kp)
+{
+ return strlcpy(buffer, DLMFS_CAPABILITIES,
+ strlen(DLMFS_CAPABILITIES) + 1);
+}
+module_param_call(capabilities, param_set_dlmfs_capabilities,
+ param_get_dlmfs_capabilities, NULL, 0444);
+MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
+
/*
* decodes a set of open flags into a valid lock level and a set of flags.
@@ -179,13 +203,46 @@
return 0;
}
+/*
+ * We do ->setattr() just to override size changes. Our size is the size
+ * of the LVB and nothing else.
+ */
+static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ int error;
+ struct inode *inode = dentry->d_inode;
+
+ attr->ia_valid &= ~ATTR_SIZE;
+ error = inode_change_ok(inode, attr);
+ if (!error)
+ error = inode_setattr(inode, attr);
+
+ return error;
+}
+
+static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
+{
+ int event = 0;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+
+ poll_wait(file, &ip->ip_lockres.l_event, wait);
+
+ spin_lock(&ip->ip_lockres.l_lock);
+ if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
+ event = POLLIN | POLLRDNORM;
+ spin_unlock(&ip->ip_lockres.l_lock);
+
+ return event;
+}
+
static ssize_t dlmfs_file_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
- ssize_t readlen;
+ ssize_t readlen, got;
char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -211,9 +268,13 @@
if (!lvb_buf)
return -ENOMEM;
- user_dlm_read_lvb(inode, lvb_buf, readlen);
- bytes_left = __copy_to_user(buf, lvb_buf, readlen);
- readlen -= bytes_left;
+ got = user_dlm_read_lvb(inode, lvb_buf, readlen);
+ if (got) {
+ BUG_ON(got != readlen);
+ bytes_left = __copy_to_user(buf, lvb_buf, readlen);
+ readlen -= bytes_left;
+ } else
+ readlen = 0;
kfree(lvb_buf);
@@ -272,7 +333,7 @@
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
@@ -314,14 +375,14 @@
goto clear_fields;
}
- mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm);
+ mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
- if (ip->ip_dlm)
- user_dlm_unregister_context(ip->ip_dlm);
+ if (ip->ip_conn)
+ user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
}
static struct backing_dev_info dlmfs_backing_dev_info = {
@@ -371,7 +432,7 @@
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
- ip->ip_dlm = DLMFS_I(parent)->ip_dlm;
+ ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
@@ -425,13 +486,12 @@
struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
- struct dlm_ctxt *dlm;
- struct dlm_protocol_version proto = user_locking_protocol;
+ struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
- if (domain->len >= O2NM_MAX_NAME_LEN) {
+ if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
@@ -446,14 +506,14 @@
ip = DLMFS_I(inode);
- dlm = user_dlm_register_context(domain, &proto);
- if (IS_ERR(dlm)) {
- status = PTR_ERR(dlm);
+ conn = user_dlm_register(domain);
+ if (IS_ERR(conn)) {
+ status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
- ip->ip_dlm = dlm;
+ ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
@@ -549,6 +609,7 @@
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
+ .poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
};
@@ -576,6 +637,7 @@
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
+ .setattr = dlmfs_file_setattr,
};
static int dlmfs_get_sb(struct file_system_type *fs_type,
@@ -620,6 +682,7 @@
}
cleanup_worker = 1;
+ user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c
similarity index 100%
rename from fs/ocfs2/dlm/dlmfsver.c
rename to fs/ocfs2/dlmfs/dlmfsver.c
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h
similarity index 100%
rename from fs/ocfs2/dlm/dlmfsver.h
rename to fs/ocfs2/dlmfs/dlmfsver.h
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
similarity index 67%
rename from fs/ocfs2/dlm/userdlm.c
rename to fs/ocfs2/dlmfs/userdlm.c
index 4cb1d3d..0499e3f 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -34,18 +34,19 @@
#include <linux/types.h>
#include <linux/crc32.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "ocfs2_lockingver.h"
+#include "stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
+
+static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct user_lock_res, l_lksb);
+}
+
static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag)
{
@@ -73,15 +74,15 @@
}
/* I heart container_of... */
-static inline struct dlm_ctxt *
-dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
+static inline struct ocfs2_cluster_connection *
+cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
- return ip->ip_dlm;
+ return ip->ip_conn;
}
static struct inode *
@@ -103,9 +104,9 @@
}
#define user_log_dlm_error(_func, _stat, _lockres) do { \
- mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
- "resource %.*s: %s\n", dlm_errname(_stat), _func, \
- _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \
+ mlog(ML_ERROR, "Dlm error %d while calling %s on " \
+ "resource %.*s\n", _stat, _func, \
+ _lockres->l_namelen, _lockres->l_name); \
} while (0)
/* WARNING: This function lives in a world where the only three lock
@@ -113,34 +114,35 @@
* lock types are added. */
static inline int user_highest_compat_lock_level(int level)
{
- int new_level = LKM_EXMODE;
+ int new_level = DLM_LOCK_EX;
- if (level == LKM_EXMODE)
- new_level = LKM_NLMODE;
- else if (level == LKM_PRMODE)
- new_level = LKM_PRMODE;
+ if (level == DLM_LOCK_EX)
+ new_level = DLM_LOCK_NL;
+ else if (level == DLM_LOCK_PR)
+ new_level = DLM_LOCK_PR;
return new_level;
}
-static void user_ast(void *opaque)
+static void user_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct user_lock_res *lockres = opaque;
- struct dlm_lockstatus *lksb;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+ int status;
- mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level,
+ lockres->l_requested);
spin_lock(&lockres->l_lock);
- lksb = &(lockres->l_lksb);
- if (lksb->status != DLM_NORMAL) {
+ status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+ if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
- lksb->status, lockres->l_namelen, lockres->l_name);
+ status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
return;
}
- mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
+ mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
@@ -148,13 +150,13 @@
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) {
- lockres->l_blocking = LKM_NLMODE;
+ lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED;
}
}
lockres->l_level = lockres->l_requested;
- lockres->l_requested = LKM_IVMODE;
+ lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY;
@@ -193,11 +195,11 @@
return;
switch (lockres->l_blocking) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
queue = 1;
break;
@@ -209,12 +211,12 @@
__user_dlm_queue_lockres(lockres);
}
-static void user_bast(void *opaque, int level)
+static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
- lockres->l_namelen, lockres->l_name, level);
+ mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
+ lockres->l_namelen, lockres->l_name, level, lockres->l_level);
spin_lock(&lockres->l_lock);
lockres->l_flags |= USER_LOCK_BLOCKED;
@@ -227,15 +229,15 @@
wake_up(&lockres->l_event);
}
-static void user_unlock_ast(void *opaque, enum dlm_status status)
+static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
- if (status != DLM_NORMAL && status != DLM_CANCELGRANT)
- mlog(ML_ERROR, "Dlm returns status %d\n", status);
+ if (status)
+ mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process,
@@ -243,7 +245,7 @@
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
- lockres->l_level = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
@@ -254,7 +256,7 @@
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
- lockres->l_requested = LKM_IVMODE; /* cancel an
+ lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
@@ -271,6 +273,21 @@
wake_up(&lockres->l_event);
}
+/*
+ * This is the userdlmfs locking protocol version.
+ *
+ * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ */
+static struct ocfs2_locking_protocol user_dlm_lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = user_ast,
+ .lp_blocking_ast = user_bast,
+ .lp_unlock_ast = user_unlock_ast,
+};
+
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
@@ -283,10 +300,10 @@
int new_level, status;
struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work);
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
@@ -304,17 +321,23 @@
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_BUSY) {
if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
@@ -322,32 +345,31 @@
lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_CANCEL,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL)
- user_log_dlm_error("dlmunlock", status, lockres);
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
+ DLM_LKF_CANCEL);
+ if (status)
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto drop_ref;
}
/* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */
- if ((lockres->l_blocking == LKM_EXMODE)
+ if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
- lockres->l_ro_holders, lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders, lockres->l_ro_holders);
goto drop_ref;
}
- if ((lockres->l_blocking == LKM_PRMODE)
+ if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for pr: ex = %u\n",
- lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders);
goto drop_ref;
}
@@ -355,22 +377,17 @@
new_level = user_highest_compat_lock_level(lockres->l_blocking);
lockres->l_requested = new_level;
lockres->l_flags |= USER_LOCK_BUSY;
- mlog(0, "Downconvert lock from %d to %d\n",
- lockres->l_level, new_level);
+ mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */
- status = dlmlock(dlm,
- new_level,
- &lockres->l_lksb,
- LKM_CONVERT|LKM_VALBLK,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmlock", status, lockres);
+ status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
+ DLM_LKF_CONVERT|DLM_LKF_VALBLK,
+ lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_recover_from_dlm_error(lockres);
}
@@ -382,10 +399,10 @@
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
@@ -410,20 +427,19 @@
int lkm_flags)
{
int status, local_flags;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
status = -EINVAL;
goto bail;
}
- mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
- lockres->l_namelen, lockres->l_name,
- (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
- lkm_flags);
+ mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
+ lockres->l_namelen, lockres->l_name, level, lkm_flags);
again:
if (signal_pending(current)) {
@@ -457,35 +473,26 @@
}
if (level > lockres->l_level) {
- local_flags = lkm_flags | LKM_VALBLK;
- if (lockres->l_level != LKM_IVMODE)
- local_flags |= LKM_CONVERT;
+ local_flags = lkm_flags | DLM_LKF_VALBLK;
+ if (lockres->l_level != DLM_LOCK_IV)
+ local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- BUG_ON(level == LKM_IVMODE);
- BUG_ON(level == LKM_NLMODE);
+ BUG_ON(level == DLM_LOCK_IV);
+ BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */
- status = dlmlock(dlm,
- level,
- &lockres->l_lksb,
- local_flags,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- if ((lkm_flags & LKM_NOQUEUE) &&
- (status == DLM_NOTQUEUED))
- status = -EAGAIN;
- else {
- user_log_dlm_error("dlmlock", status, lockres);
- status = -EINVAL;
- }
+ status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
+ local_flags, lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ if ((lkm_flags & DLM_LKF_NOQUEUE) &&
+ (status != -EAGAIN))
+ user_log_dlm_error("ocfs2_dlm_lock",
+ status, lockres);
user_recover_from_dlm_error(lockres);
goto bail;
}
@@ -506,11 +513,11 @@
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
@@ -522,8 +529,8 @@
void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level)
{
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
return;
@@ -540,33 +547,40 @@
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_EXMODE);
+ BUG_ON(lockres->l_level < DLM_LOCK_EX);
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock);
}
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len)
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
+ ssize_t ret = len;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_PRMODE);
- memcpy(val, lvb, len);
+ BUG_ON(lockres->l_level < DLM_LOCK_PR);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ memcpy(val, lvb, len);
+ } else
+ ret = 0;
spin_unlock(&lockres->l_lock);
+ return ret;
}
void user_dlm_lock_res_init(struct user_lock_res *lockres,
@@ -576,9 +590,9 @@
spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event);
- lockres->l_level = LKM_IVMODE;
- lockres->l_requested = LKM_IVMODE;
- lockres->l_blocking = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
+ lockres->l_requested = DLM_LOCK_IV;
+ lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
@@ -592,9 +606,10 @@
int user_dlm_destroy_lock(struct user_lock_res *lockres)
{
int status = -EBUSY;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
+ mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
@@ -627,14 +642,9 @@
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_VALBLK,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmunlock", status, lockres);
- status = -EINVAL;
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
@@ -645,32 +655,34 @@
return status;
}
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto)
+static void user_dlm_recovery_handler_noop(int node_num,
+ void *recovery_data)
{
- struct dlm_ctxt *dlm;
- u32 dlm_key;
- char *domain;
-
- domain = kmalloc(name->len + 1, GFP_NOFS);
- if (!domain) {
- mlog_errno(-ENOMEM);
- return ERR_PTR(-ENOMEM);
- }
-
- dlm_key = crc32_le(0, name->name, name->len);
-
- snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
-
- dlm = dlm_register_domain(domain, dlm_key, proto);
- if (IS_ERR(dlm))
- mlog_errno(PTR_ERR(dlm));
-
- kfree(domain);
- return dlm;
+ /* We ignore recovery events */
+ return;
}
-void user_dlm_unregister_context(struct dlm_ctxt *dlm)
+void user_dlm_set_locking_protocol(void)
{
- dlm_unregister_domain(dlm);
+ ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
+}
+
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
+{
+ int rc;
+ struct ocfs2_cluster_connection *conn;
+
+ rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
+ &user_dlm_lproto,
+ user_dlm_recovery_handler_noop,
+ NULL, &conn);
+ if (rc)
+ mlog_errno(rc);
+
+ return rc ? ERR_PTR(rc) : conn;
+}
+
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
+{
+ ocfs2_cluster_disconnect(conn, 0);
}
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
similarity index 89%
rename from fs/ocfs2/dlm/userdlm.h
rename to fs/ocfs2/dlmfs/userdlm.h
index 0c3cc03..3b42d79 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -57,7 +57,7 @@
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- struct dlm_lockstatus l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
int l_requested;
int l_blocking;
@@ -80,15 +80,15 @@
void user_dlm_write_lvb(struct inode *inode,
const char *val,
unsigned int len);
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len);
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto);
-void user_dlm_unregister_context(struct dlm_ctxt *dlm);
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len);
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
+void user_dlm_set_locking_protocol(void);
struct dlmfs_inode_private {
- struct dlm_ctxt *ip_dlm;
+ struct ocfs2_cluster_connection *ip_conn;
struct user_lock_res ip_lockres; /* unused for directories. */
struct inode *ip_parent;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e044019..8298608 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -297,6 +297,11 @@
lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
+static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct ocfs2_lock_res, l_lksb);
+}
+
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{
BUG_ON(!ocfs2_is_inode_lock(lockres));
@@ -927,6 +932,10 @@
lockres->l_blocking = level;
}
+ mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
+ lockres->l_name, level, lockres->l_level, lockres->l_blocking,
+ needs_downconvert);
+
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
@@ -1040,18 +1049,17 @@
return lockres->l_pending_gen;
}
-
-static void ocfs2_blocking_ast(void *opaque, int level)
+static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
int needs_downconvert;
unsigned long flags;
BUG_ON(level <= DLM_LOCK_NL);
- mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
- lockres->l_name, level, lockres->l_level,
+ mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
+ "type %s\n", lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
/*
@@ -1072,9 +1080,9 @@
ocfs2_wake_downconvert_thread(osb);
}
-static void ocfs2_locking_ast(void *opaque)
+static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
int status;
@@ -1095,6 +1103,10 @@
return;
}
+ mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
+ "level %d => %d\n", lockres->l_name, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
+
switch(lockres->l_action) {
case OCFS2_AST_ATTACH:
ocfs2_generic_handle_attach_action(lockres);
@@ -1107,8 +1119,8 @@
ocfs2_generic_handle_downconvert_action(lockres);
break;
default:
- mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
- "lockres flags = 0x%lx, unlock action: %u\n",
+ mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
+ "flags 0x%lx, unlock: %u\n",
lockres->l_name, lockres->l_action, lockres->l_flags,
lockres->l_unlock_action);
BUG();
@@ -1134,6 +1146,88 @@
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
+static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
+ lockres->l_name, lockres->l_unlock_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (error) {
+ mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
+ "unlock_action %d\n", error, lockres->l_name,
+ lockres->l_unlock_action);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
+ return;
+ }
+
+ switch(lockres->l_unlock_action) {
+ case OCFS2_UNLOCK_CANCEL_CONVERT:
+ mlog(0, "Cancel convert success for %s\n", lockres->l_name);
+ lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
+ break;
+ case OCFS2_UNLOCK_DROP_LOCK:
+ lockres->l_level = DLM_LOCK_IV;
+ break;
+ default:
+ BUG();
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ wake_up(&lockres->l_event);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog_exit_void();
+}
+
+/*
+ * This is the filesystem locking protocol. It provides the lock handling
+ * hooks for the underlying DLM. It has a maximum version number.
+ * The version number allows interoperability with systems running at
+ * the same major number and an equal or smaller minor number.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+static struct ocfs2_locking_protocol lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = ocfs2_locking_ast,
+ .lp_blocking_ast = ocfs2_blocking_ast,
+ .lp_unlock_ast = ocfs2_unlock_ast,
+};
+
+void ocfs2_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
+}
+
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert)
{
@@ -1189,8 +1283,7 @@
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -1412,7 +1505,7 @@
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
- mlog(0, "lock %s, convert from %d to level = %d\n",
+ mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
lockres->l_name, lockres->l_level, level);
/* call dlm_lock to upgrade lock now */
@@ -1421,8 +1514,7 @@
&lockres->l_lksb,
lkm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
@@ -1859,8 +1951,7 @@
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
- lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
if (ret) {
if (!trylock || (ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -2989,7 +3080,7 @@
status = ocfs2_cluster_connect(osb->osb_cluster_stack,
osb->uuid_str,
strlen(osb->uuid_str),
- ocfs2_do_node_down, osb,
+ &lproto, ocfs2_do_node_down, osb,
&conn);
if (status) {
mlog_errno(status);
@@ -3056,50 +3147,6 @@
mlog_exit_void();
}
-static void ocfs2_unlock_ast(void *opaque, int error)
-{
- struct ocfs2_lock_res *lockres = opaque;
- unsigned long flags;
-
- mlog_entry_void();
-
- mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
- lockres->l_unlock_action);
-
- spin_lock_irqsave(&lockres->l_lock, flags);
- if (error) {
- mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
- "unlock_action %d\n", error, lockres->l_name,
- lockres->l_unlock_action);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- mlog_exit_void();
- return;
- }
-
- switch(lockres->l_unlock_action) {
- case OCFS2_UNLOCK_CANCEL_CONVERT:
- mlog(0, "Cancel convert success for %s\n", lockres->l_name);
- lockres->l_action = OCFS2_AST_INVALID;
- /* Downconvert thread may have requeued this lock, we
- * need to wake it. */
- if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
- ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
- break;
- case OCFS2_UNLOCK_DROP_LOCK:
- lockres->l_level = DLM_LOCK_IV;
- break;
- default:
- BUG();
- }
-
- lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
- lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
- wake_up(&lockres->l_event);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- mlog_exit_void();
-}
-
static int ocfs2_drop_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3167,8 +3214,7 @@
mlog(0, "lock %s\n", lockres->l_name);
- ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
- lockres);
+ ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
@@ -3276,13 +3322,20 @@
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
if (lockres->l_level <= new_level) {
- mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
- lockres->l_level, new_level);
+ mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
+ "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
+ "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
+ new_level, list_empty(&lockres->l_blocked_list),
+ list_empty(&lockres->l_mask_waiters), lockres->l_type,
+ lockres->l_flags, lockres->l_ro_holders,
+ lockres->l_ex_holders, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_requested,
+ lockres->l_blocking, lockres->l_pending_gen);
BUG();
}
- mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
- lockres->l_name, new_level, lockres->l_blocking);
+ mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
+ lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
lockres->l_action = OCFS2_AST_DOWNCONVERT;
lockres->l_requested = new_level;
@@ -3301,6 +3354,9 @@
mlog_entry_void();
+ mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ lockres->l_level, new_level);
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
@@ -3309,8 +3365,7 @@
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, generation, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -3331,14 +3386,12 @@
assert_spin_locked(&lockres->l_lock);
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
* requeue this lock. */
-
- mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
return 0;
}
@@ -3353,6 +3406,8 @@
"lock %s, invalid flags: 0x%lx\n",
lockres->l_name, lockres->l_flags);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
+
return 1;
}
@@ -3362,16 +3417,15 @@
int ret;
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
- DLM_LKF_CANCEL, lockres);
+ DLM_LKF_CANCEL);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 0);
}
- mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
mlog_exit(ret);
return ret;
@@ -3428,8 +3482,11 @@
* at the same time they set OCFS2_DLM_BUSY. They must
* clear OCFS2_DLM_PENDING after dlm_lock() returns.
*/
- if (lockres->l_flags & OCFS2_LOCK_PENDING)
+ if (lockres->l_flags & OCFS2_LOCK_PENDING) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
+ lockres->l_name);
goto leave_requeue;
+ }
ctl->requeue = 1;
ret = ocfs2_prepare_cancel_convert(osb, lockres);
@@ -3461,6 +3518,7 @@
*/
if (lockres->l_level == DLM_LOCK_NL) {
BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -3470,28 +3528,41 @@
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
- && (lockres->l_ex_holders || lockres->l_ro_holders))
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
+ lockres->l_name, lockres->l_ex_holders,
+ lockres->l_ro_holders);
goto leave_requeue;
+ }
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == DLM_LOCK_PR &&
- lockres->l_ex_holders)
+ lockres->l_ex_holders) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
+ lockres->l_name, lockres->l_ex_holders);
goto leave_requeue;
+ }
/*
* Can we get a lock in this state if the holder counts are
* zero? The meta data unblock code used to check this.
*/
if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
- && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
+ && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
if (lockres->l_ops->check_downconvert
- && !lockres->l_ops->check_downconvert(lockres, new_level))
+ && !lockres->l_ops->check_downconvert(lockres, new_level)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
@@ -3509,13 +3580,19 @@
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
- if (ctl->unblock_action == UNBLOCK_STOP_POST)
+ if (ctl->unblock_action == UNBLOCK_STOP_POST) {
+ mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
+ lockres->l_name);
goto leave;
+ }
spin_lock_irqsave(&lockres->l_lock, flags);
if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
+ mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
+ "Recheck\n", lockres->l_name, blocking,
+ lockres->l_blocking, level, lockres->l_level);
goto recheck;
}
@@ -3910,45 +3987,6 @@
ocfs2_cluster_unlock(osb, lockres, level);
}
-/*
- * This is the filesystem locking protocol. It provides the lock handling
- * hooks for the underlying DLM. It has a maximum version number.
- * The version number allows interoperability with systems running at
- * the same major number and an equal or smaller minor number.
- *
- * Whenever the filesystem does new things with locks (adds or removes a
- * lock, orders them differently, does different things underneath a lock),
- * the version must be changed. The protocol is negotiated when joining
- * the dlm domain. A node may join the domain if its major version is
- * identical to all other nodes and its minor version is greater than
- * or equal to all other nodes. When its minor version is greater than
- * the other nodes, it will run at the minor version specified by the
- * other nodes.
- *
- * If a locking change is made that will not be compatible with older
- * versions, the major number must be increased and the minor version set
- * to zero. If a change merely adds a behavior that can be disabled when
- * speaking to older versions, the minor version must be increased. If a
- * change adds a fully backwards compatible change (eg, LVB changes that
- * are just ignored by older versions), the version does not need to be
- * updated.
- */
-static struct ocfs2_locking_protocol lproto = {
- .lp_max_version = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
- },
- .lp_lock_ast = ocfs2_locking_ast,
- .lp_blocking_ast = ocfs2_blocking_ast,
- .lp_unlock_ast = ocfs2_unlock_ast,
-};
-
-void ocfs2_set_locking_protocol(void)
-{
- ocfs2_stack_glue_set_locking_protocol(&lproto);
-}
-
-
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3965,7 +4003,7 @@
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
- mlog(0, "lockres %s blocked.\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the downconvert thread was processing other things. A lock can
@@ -3988,7 +4026,7 @@
} else
ocfs2_schedule_blocked_lock(osb, lockres);
- mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
+ mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
ctl.requeue ? "yes" : "no");
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -4010,7 +4048,7 @@
/* Do not schedule a lock for downconvert when it's on
* the way to destruction - any nodes wanting access
* to the resource will get it soon. */
- mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
+ mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
return;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 558ce03..5b52547 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -993,10 +993,9 @@
}
if (size_change && attr->ia_size != i_size_read(inode)) {
- if (attr->ia_size > sb->s_maxbytes) {
- status = -EFBIG;
+ status = inode_newsize_ok(inode, attr->ia_size);
+ if (status)
goto bail_unlock;
- }
if (i_size_read(inode) > attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
@@ -1836,6 +1835,8 @@
&meta_level);
if (has_refcount)
*has_refcount = 1;
+ if (direct_io)
+ *direct_io = 0;
}
if (ret < 0) {
@@ -1859,10 +1860,6 @@
break;
}
- if (has_refcount && *has_refcount == 1) {
- *direct_io = 0;
- break;
- }
/*
* Allowing concurrent direct writes means
* i_size changes wouldn't be synchronized, so
@@ -2043,7 +2040,7 @@
* async dio is going to do it in the future or an end_io after an
* error has already done it.
*/
- if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
+ if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
}
diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h
index cf9a5ee..0cd5323 100644
--- a/fs/ocfs2/ioctl.h
+++ b/fs/ocfs2/ioctl.h
@@ -7,10 +7,10 @@
*
*/
-#ifndef OCFS2_IOCTL_H
-#define OCFS2_IOCTL_H
+#ifndef OCFS2_IOCTL_PROTO_H
+#define OCFS2_IOCTL_PROTO_H
long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
-#endif /* OCFS2_IOCTL_H */
+#endif /* OCFS2_IOCTL_PROTO_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ac10f83..ca992d9 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -476,7 +476,7 @@
out:
if (!status)
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 740f448..1238b49 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -42,6 +42,7 @@
#include "ocfs2_fs.h"
#include "ocfs2_lockid.h"
+#include "ocfs2_ioctl.h"
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
@@ -159,7 +160,7 @@
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- union ocfs2_dlm_lksb l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
/* used from AST/BAST funcs. */
enum ocfs2_ast_action l_action;
@@ -305,7 +306,9 @@
u32 s_next_generation;
unsigned long osb_flags;
s16 s_inode_steal_slot;
+ s16 s_meta_steal_slot;
atomic_t s_num_inodes_stolen;
+ atomic_t s_num_meta_stolen;
unsigned long s_mount_opt;
unsigned int s_atime_quantum;
@@ -760,33 +763,6 @@
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
-static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
- spin_unlock(&osb->osb_lock);
- atomic_set(&osb->s_num_inodes_stolen, 0);
-}
-
-static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb,
- s16 slot)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = slot;
- spin_unlock(&osb->osb_lock);
-}
-
-static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
-{
- s16 slot;
-
- spin_lock(&osb->osb_lock);
- slot = osb->s_inode_steal_slot;
- spin_unlock(&osb->osb_lock);
-
- return slot;
-}
-
#define ocfs2_set_bit ext2_set_bit
#define ocfs2_clear_bit ext2_clear_bit
#define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7638a38..bb37218 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -254,63 +254,6 @@
* refcount tree */
/*
- * ioctl commands
- */
-#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
-
-/*
- * Space reservation / allocation / free ioctls and argument structure
- * are designed to be compatible with XFS.
- *
- * ALLOCSP* and FREESP* are not and will never be supported, but are
- * included here for completeness.
- */
-struct ocfs2_space_resv {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start;
- __s64 l_len; /* len == 0 means until end of file */
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-};
-
-#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
-#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
-
-/* Used to pass group descriptor data when online resize is done */
-struct ocfs2_new_group_input {
- __u64 group; /* Group descriptor's blkno. */
- __u32 clusters; /* Total number of clusters in this group */
- __u32 frees; /* Total free clusters in this group */
- __u16 chain; /* Chain for this group */
- __u16 reserved1;
- __u32 reserved2;
-};
-
-#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
-#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
-#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
-
-/* Used to pass 2 file names to reflink. */
-struct reflink_arguments {
- __u64 old_path;
- __u64 new_path;
- __u64 preserve;
-};
-#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
-
-
-/*
* Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
*/
#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
new file mode 100644
index 0000000..2d3420a
--- /dev/null
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -0,0 +1,79 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_ioctl.h
+ *
+ * Defines OCFS2 ioctls.
+ *
+ * Copyright (C) 2010 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_IOCTL_H
+#define OCFS2_IOCTL_H
+
+/*
+ * ioctl commands
+ */
+#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
+#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
+#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
+#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
+
+/*
+ * Space reservation / allocation / free ioctls and argument structure
+ * are designed to be compatible with XFS.
+ *
+ * ALLOCSP* and FREESP* are not and will never be supported, but are
+ * included here for completeness.
+ */
+struct ocfs2_space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len; /* len == 0 means until end of file */
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
+#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
+
+/* Used to pass group descriptor data when online resize is done */
+struct ocfs2_new_group_input {
+ __u64 group; /* Group descriptor's blkno. */
+ __u32 clusters; /* Total number of clusters in this group */
+ __u32 frees; /* Total free clusters in this group */
+ __u16 chain; /* Chain for this group */
+ __u16 reserved1;
+ __u32 reserved2;
+};
+
+#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
+#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
+#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
+
+/* Used to pass 2 file names to reflink. */
+struct reflink_arguments {
+ __u64 old_path;
+ __u64 new_path;
+ __u64 preserve;
+};
+#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
+
+#endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
index 82d5eea..2e45c8d 100644
--- a/fs/ocfs2/ocfs2_lockingver.h
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -23,6 +23,8 @@
/*
* The protocol version for ocfs2 cluster locking. See dlmglue.c for
* more details.
+ *
+ * 1.0 - Initial locking version from ocfs2 1.4.
*/
#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
#define OCFS2_LOCKING_PROTOCOL_MINOR 0
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 8ae65c9..fb6aa7a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -626,7 +626,7 @@
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
+ rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
@@ -1330,7 +1330,7 @@
memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
@@ -1576,7 +1576,7 @@
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3038c92..7020e12 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -161,24 +161,23 @@
static void o2dlm_lock_ast_wrapper(void *astarg)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
{
+ struct ocfs2_dlm_lksb *lksb = astarg;
int error = dlm_status_to_errno(status);
- BUG_ON(o2cb_stack.sp_proto == NULL);
-
/*
* In o2dlm, you can get both the lock_ast() for the lock being
* granted and the unlock_ast() for the CANCEL failing. A
@@ -193,16 +192,15 @@
if (status == DLM_CANCELGRANT)
return;
- o2cb_stack.sp_proto->lp_unlock_ast(astarg, error);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error);
}
static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
enum dlm_status status;
int o2dlm_mode = mode_to_o2dlm(mode);
@@ -211,28 +209,27 @@
status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
o2dlm_flags, name, namelen,
- o2dlm_lock_ast_wrapper, astarg,
+ o2dlm_lock_ast_wrapper, lksb,
o2dlm_blocking_ast_wrapper);
ret = dlm_status_to_errno(status);
return ret;
}
static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
enum dlm_status status;
int o2dlm_flags = flags_to_o2dlm(flags);
int ret;
status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
- o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg);
+ o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb);
ret = dlm_status_to_errno(status);
return ret;
}
-static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return dlm_status_to_errno(lksb->lksb_o2dlm.status);
}
@@ -242,17 +239,17 @@
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/
-static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return 1;
}
-static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return (void *)(lksb->lksb_o2dlm.lvb);
}
-static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
}
@@ -280,7 +277,7 @@
struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ BUG_ON(conn->cc_proto == NULL);
/* for now we only have one cluster/node, make sure we see it
* in the heartbeat universe */
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index da78a2a..5ae8812 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -25,7 +25,6 @@
#include <linux/reboot.h>
#include <asm/uaccess.h>
-#include "ocfs2.h" /* For struct ocfs2_lock_res */
#include "stackglue.h"
#include <linux/dlm_plock.h>
@@ -63,8 +62,8 @@
* negotiated by the client. The client negotiates based on the maximum
* version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
* number from the "SETV" message must match
- * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number
- * must be less than or equal to ...->lp_max_version.pv_minor.
+ * ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number
+ * must be less than or equal to ...sp_max_version.pv_minor.
*
* Once this information has been set, mounts will be allowed. From this
* point on, the "DOWN" message can be sent for node down notification.
@@ -401,7 +400,7 @@
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
struct ocfs2_protocol_version *max =
- &ocfs2_user_plugin.sp_proto->lp_max_version;
+ &ocfs2_user_plugin.sp_max_proto;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
@@ -664,18 +663,10 @@
-rc);
}
-static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg)
-{
- struct ocfs2_lock_res *res = astarg;
- return &res->l_lksb.lksb_fsdlm;
-}
-
static void fsdlm_lock_ast_wrapper(void *astarg)
{
- struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg);
- int status = lksb->sb_status;
-
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
+ int status = lksb->lksb_fsdlm.sb_status;
/*
* For now we're punting on the issue of other non-standard errors
@@ -688,25 +679,24 @@
*/
if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
- ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0);
else
- ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
int ret;
@@ -716,36 +706,35 @@
ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
flags|DLM_LKF_NODLCKWT, name, namelen, 0,
- fsdlm_lock_ast_wrapper, astarg,
+ fsdlm_lock_ast_wrapper, lksb,
fsdlm_blocking_ast_wrapper);
return ret;
}
static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
int ret;
ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
- flags, &lksb->lksb_fsdlm, astarg);
+ flags, &lksb->lksb_fsdlm, lksb);
return ret;
}
-static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return lksb->lksb_fsdlm.sb_status;
}
-static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
return !invalid;
}
-static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
@@ -753,7 +742,7 @@
return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
}
-static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
}
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index f3df0ba..39abf89 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -36,7 +36,7 @@
#define OCFS2_STACK_PLUGIN_USER "user"
#define OCFS2_MAX_HB_CTL_PATH 256
-static struct ocfs2_locking_protocol *lproto;
+static struct ocfs2_protocol_version locking_max_version;
static DEFINE_SPINLOCK(ocfs2_stack_lock);
static LIST_HEAD(ocfs2_stack_list);
static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
@@ -176,7 +176,7 @@
spin_lock(&ocfs2_stack_lock);
if (!ocfs2_stack_lookup(plugin->sp_name)) {
plugin->sp_count = 0;
- plugin->sp_proto = lproto;
+ plugin->sp_max_proto = locking_max_version;
list_add(&plugin->sp_list, &ocfs2_stack_list);
printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
plugin->sp_name);
@@ -213,77 +213,76 @@
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto)
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto)
{
struct ocfs2_stack_plugin *p;
- BUG_ON(proto == NULL);
-
spin_lock(&ocfs2_stack_lock);
- BUG_ON(active_stack != NULL);
+ if (memcmp(max_proto, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ BUG_ON(locking_max_version.pv_major != 0);
- lproto = proto;
- list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
- p->sp_proto = lproto;
+ locking_max_version = *max_proto;
+ list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+ p->sp_max_proto = locking_max_version;
+ }
}
-
spin_unlock(&ocfs2_stack_lock);
}
-EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol);
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version);
/*
- * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take
- * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the
- * underlying stack plugins need to pilfer the lksb off of the lock_res.
- * If some other structure needs to be passed as an astarg, the plugins
- * will need to be given a different avenue to the lksb.
+ * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument
+ * for the ast and bast functions. They will pass the lksb to the ast
+ * and bast. The caller can wrap the lksb with their own structure to
+ * get more information.
*/
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg)
+ unsigned int namelen)
{
- BUG_ON(lproto == NULL);
-
+ if (!lksb->lksb_conn)
+ lksb->lksb_conn = conn;
+ else
+ BUG_ON(lksb->lksb_conn != conn);
return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
- name, namelen, astarg);
+ name, namelen);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
- BUG_ON(lproto == NULL);
+ BUG_ON(lksb->lksb_conn == NULL);
- return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg);
+ return active_stack->sp_ops->dlm_unlock(conn, lksb, flags);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_status(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lvb_valid(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_lvb(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
active_stack->sp_ops->dump_lksb(lksb);
}
@@ -312,6 +311,7 @@
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
@@ -329,6 +329,12 @@
goto out;
}
+ if (memcmp(&lproto->lp_max_version, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
GFP_KERNEL);
if (!new_conn) {
@@ -341,6 +347,7 @@
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
+ new_conn->cc_proto = lproto;
/* Start the new connection at our maximum compatibility level */
new_conn->cc_version = lproto->lp_max_version;
@@ -366,6 +373,24 @@
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
+/* The caller will ensure all nodes have the same cluster stack */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn)
+{
+ char *stack_name = NULL;
+
+ if (cluster_stack_name[0])
+ stack_name = cluster_stack_name;
+ return ocfs2_cluster_connect(stack_name, group, grouplen, lproto,
+ recovery_handler, recovery_data, conn);
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic);
+
/* If hangup_pending is 0, the stack driver will be dropped */
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending)
@@ -453,10 +478,10 @@
ssize_t ret = 0;
spin_lock(&ocfs2_stack_lock);
- if (lproto)
+ if (locking_max_version.pv_major)
ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
- lproto->lp_max_version.pv_major,
- lproto->lp_max_version.pv_minor);
+ locking_max_version.pv_major,
+ locking_max_version.pv_minor);
spin_unlock(&ocfs2_stack_lock);
return ret;
@@ -685,7 +710,10 @@
static void __exit ocfs2_stack_glue_exit(void)
{
- lproto = NULL;
+ memset(&locking_max_version, 0,
+ sizeof(struct ocfs2_protocol_version));
+ locking_max_version.pv_major = 0;
+ locking_max_version.pv_minor = 0;
ocfs2_sysfs_exit();
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 03a44d6..8ce7398 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -56,17 +56,6 @@
};
/*
- * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
- */
-struct ocfs2_locking_protocol {
- struct ocfs2_protocol_version lp_max_version;
- void (*lp_lock_ast)(void *astarg);
- void (*lp_blocking_ast)(void *astarg, int level);
- void (*lp_unlock_ast)(void *astarg, int error);
-};
-
-
-/*
* The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only
* has a pointer to separately allocated lvb space. This struct exists only to
* include in the lksb union to make space for a combined dlm_lksb and lvb.
@@ -81,13 +70,28 @@
* size of the union is known. Lock status structures are embedded in
* ocfs2 inodes.
*/
-union ocfs2_dlm_lksb {
- struct dlm_lockstatus lksb_o2dlm;
- struct dlm_lksb lksb_fsdlm;
- struct fsdlm_lksb_plus_lvb padding;
+struct ocfs2_cluster_connection;
+struct ocfs2_dlm_lksb {
+ union {
+ struct dlm_lockstatus lksb_o2dlm;
+ struct dlm_lksb lksb_fsdlm;
+ struct fsdlm_lksb_plus_lvb padding;
+ };
+ struct ocfs2_cluster_connection *lksb_conn;
};
/*
+ * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
+ */
+struct ocfs2_locking_protocol {
+ struct ocfs2_protocol_version lp_max_version;
+ void (*lp_lock_ast)(struct ocfs2_dlm_lksb *lksb);
+ void (*lp_blocking_ast)(struct ocfs2_dlm_lksb *lksb, int level);
+ void (*lp_unlock_ast)(struct ocfs2_dlm_lksb *lksb, int error);
+};
+
+
+/*
* A cluster connection. Mostly opaque to ocfs2, the connection holds
* state for the underlying stack. ocfs2 does use cc_version to determine
* locking compatibility.
@@ -96,6 +100,7 @@
char cc_name[GROUP_NAME_MAX];
int cc_namelen;
struct ocfs2_protocol_version cc_version;
+ struct ocfs2_locking_protocol *cc_proto;
void (*cc_recovery_handler)(int node_num, void *recovery_data);
void *cc_recovery_data;
void *cc_lockspace;
@@ -155,27 +160,29 @@
*
* ast and bast functions are not part of the call because the
* stack will likely want to wrap ast and bast calls before passing
- * them to stack->sp_proto.
+ * them to stack->sp_proto. There is no astarg. The lksb will
+ * be passed back to the ast and bast functions. The caller can
+ * use this to find their object.
*/
int (*dlm_lock)(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg);
+ unsigned int namelen);
/*
* Call the underlying dlm unlock function. The ->dlm_unlock()
* function should convert the flags as appropriate.
*
* The unlock ast is not passed, as the stack will want to wrap
- * it before calling stack->sp_proto->lp_unlock_ast().
+ * it before calling stack->sp_proto->lp_unlock_ast(). There is
+ * no astarg. The lksb will be passed back to the unlock ast
+ * function. The caller can use this to find their object.
*/
int (*dlm_unlock)(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
/*
* Return the status of the current lock status block. The fs
@@ -183,17 +190,17 @@
* callback pulls out the stack-specific lksb, converts the status
* to a proper errno, and returns it.
*/
- int (*lock_status)(union ocfs2_dlm_lksb *lksb);
+ int (*lock_status)(struct ocfs2_dlm_lksb *lksb);
/*
* Return non-zero if the LVB is valid.
*/
- int (*lvb_valid)(union ocfs2_dlm_lksb *lksb);
+ int (*lvb_valid)(struct ocfs2_dlm_lksb *lksb);
/*
* Pull the lvb pointer off of the stack-specific lksb.
*/
- void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
+ void *(*lock_lvb)(struct ocfs2_dlm_lksb *lksb);
/*
* Cluster-aware posix locks
@@ -210,7 +217,7 @@
* This is an optoinal debugging hook. If provided, the
* stack can dump debugging information about this lock.
*/
- void (*dump_lksb)(union ocfs2_dlm_lksb *lksb);
+ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);
};
/*
@@ -226,7 +233,7 @@
/* These are managed by the stackglue code. */
struct list_head sp_list;
unsigned int sp_count;
- struct ocfs2_locking_protocol *sp_proto;
+ struct ocfs2_protocol_version sp_max_proto;
};
@@ -234,10 +241,22 @@
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
struct ocfs2_cluster_connection **conn);
+/*
+ * Used by callers that don't store their stack name. They must ensure
+ * all nodes have the same stack.
+ */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn);
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending);
void ocfs2_cluster_hangup(const char *group, int grouplen);
@@ -246,26 +265,24 @@
struct ocfs2_lock_res;
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg);
+ unsigned int namelen);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb);
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb);
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb);
int ocfs2_stack_supports_plocks(void);
int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino,
struct file *file, int cmd, struct file_lock *fl);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto);
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto);
/* Used by stack plugins */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c30b644..c3c60bc 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -51,7 +51,7 @@
#define ALLOC_NEW_GROUP 0x1
#define ALLOC_GROUPS_FROM_GLOBAL 0x2
-#define OCFS2_MAX_INODES_TO_STEAL 1024
+#define OCFS2_MAX_TO_STEAL 1024
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
@@ -637,12 +637,113 @@
return status;
}
+static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_inodes_stolen, 0);
+}
+
+static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_meta_stolen, 0);
+}
+
+void ocfs2_init_steal_slots(struct ocfs2_super *osb)
+{
+ ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_meta_steal_slot(osb);
+}
+
+static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
+{
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ osb->s_inode_steal_slot = slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ osb->s_meta_steal_slot = slot;
+ spin_unlock(&osb->osb_lock);
+}
+
+static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
+{
+ int slot = OCFS2_INVALID_SLOT;
+
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ slot = osb->s_inode_steal_slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ slot = osb->s_meta_steal_slot;
+ spin_unlock(&osb->osb_lock);
+
+ return slot;
+}
+
+static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_resource(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac,
+ int type)
+{
+ int i, status = -ENOSPC;
+ int slot = __ocfs2_get_steal_slot(osb, type);
+
+ /* Start to steal resource from the first slot after ours. */
+ if (slot == OCFS2_INVALID_SLOT)
+ slot = osb->slot_num + 1;
+
+ for (i = 0; i < osb->max_slots; i++, slot++) {
+ if (slot == osb->max_slots)
+ slot = 0;
+
+ if (slot == osb->slot_num)
+ continue;
+
+ status = ocfs2_reserve_suballoc_bits(osb, ac,
+ type,
+ (u32)slot, NULL,
+ NOT_ALLOC_NEW_GROUP);
+ if (status >= 0) {
+ __ocfs2_set_steal_slot(osb, slot, type);
+ break;
+ }
+
+ ocfs2_free_ac_resource(ac);
+ }
+
+ return status;
+}
+
+static int ocfs2_steal_inode(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_meta(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
int blocks,
struct ocfs2_alloc_context **ac)
{
int status;
- u32 slot;
+ int slot = ocfs2_get_meta_steal_slot(osb);
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
@@ -653,12 +754,34 @@
(*ac)->ac_bits_wanted = blocks;
(*ac)->ac_which = OCFS2_AC_USE_META;
- slot = osb->slot_num;
(*ac)->ac_group_search = ocfs2_block_group_search;
+ if (slot != OCFS2_INVALID_SLOT &&
+ atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
+ goto extent_steal;
+
+ atomic_set(&osb->s_num_meta_stolen, 0);
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
- slot, NULL, ALLOC_NEW_GROUP);
+ (u32)osb->slot_num, NULL,
+ ALLOC_NEW_GROUP);
+
+
+ if (status >= 0) {
+ status = 0;
+ if (slot != OCFS2_INVALID_SLOT)
+ ocfs2_init_meta_steal_slot(osb);
+ goto bail;
+ } else if (status < 0 && status != -ENOSPC) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_free_ac_resource(*ac);
+
+extent_steal:
+ status = ocfs2_steal_meta(osb, *ac);
+ atomic_inc(&osb->s_num_meta_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -685,43 +808,11 @@
ac);
}
-static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
- struct ocfs2_alloc_context *ac)
-{
- int i, status = -ENOSPC;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
-
- /* Start to steal inodes from the first slot after ours. */
- if (slot == OCFS2_INVALID_SLOT)
- slot = osb->slot_num + 1;
-
- for (i = 0; i < osb->max_slots; i++, slot++) {
- if (slot == osb->max_slots)
- slot = 0;
-
- if (slot == osb->slot_num)
- continue;
-
- status = ocfs2_reserve_suballoc_bits(osb, ac,
- INODE_ALLOC_SYSTEM_INODE,
- slot, NULL,
- NOT_ALLOC_NEW_GROUP);
- if (status >= 0) {
- ocfs2_set_inode_steal_slot(osb, slot);
- break;
- }
-
- ocfs2_free_ac_resource(ac);
- }
-
- return status;
-}
-
int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac)
{
int status;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
+ int slot = ocfs2_get_inode_steal_slot(osb);
u64 alloc_group;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
@@ -754,14 +845,14 @@
* need to check our slots to see whether there is some space for us.
*/
if (slot != OCFS2_INVALID_SLOT &&
- atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
+ atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
goto inode_steal;
atomic_set(&osb->s_num_inodes_stolen, 0);
alloc_group = osb->osb_inode_alloc_group;
status = ocfs2_reserve_suballoc_bits(osb, *ac,
INODE_ALLOC_SYSTEM_INODE,
- osb->slot_num,
+ (u32)osb->slot_num,
&alloc_group,
ALLOC_NEW_GROUP |
ALLOC_GROUPS_FROM_GLOBAL);
@@ -789,7 +880,7 @@
ocfs2_free_ac_resource(*ac);
inode_steal:
- status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
+ status = ocfs2_steal_inode(osb, *ac);
atomic_inc(&osb->s_num_inodes_stolen);
if (status < 0) {
if (status != -ENOSPC)
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 8c9a78a..fa60723 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,7 @@
is the same as ~0 - unlimited */
};
+void ocfs2_init_steal_slots(struct ocfs2_super *osb);
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
{
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 755cd49..dee0319 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -69,6 +69,7 @@
#include "xattr.h"
#include "quota.h"
#include "refcounttree.h"
+#include "suballoc.h"
#include "buffer_head_io.h"
@@ -301,9 +302,12 @@
spin_lock(&osb->osb_lock);
out += snprintf(buf + out, len - out,
- "%10s => Slot: %d NumStolen: %d\n", "Steal",
+ "%10s => InodeSlot: %d StolenInodes: %d, "
+ "MetaSlot: %d StolenMeta: %d\n", "Steal",
osb->s_inode_steal_slot,
- atomic_read(&osb->s_num_inodes_stolen));
+ atomic_read(&osb->s_num_inodes_stolen),
+ osb->s_meta_steal_slot,
+ atomic_read(&osb->s_num_meta_stolen));
spin_unlock(&osb->osb_lock);
out += snprintf(buf + out, len - out, "OrphanScan => ");
@@ -1997,7 +2001,7 @@
osb->blocked_lock_count = 0;
spin_lock_init(&osb->osb_lock);
spin_lock_init(&osb->osb_xattr_lock);
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
atomic_set(&osb->alloc_stats.moves, 0);
atomic_set(&osb->alloc_stats.local_data, 0);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 8fc6fb0..d1b0d38 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -116,10 +116,11 @@
};
struct ocfs2_xattr_info {
- int name_index;
- const char *name;
- const void *value;
- size_t value_len;
+ int xi_name_index;
+ const char *xi_name;
+ int xi_name_len;
+ const void *xi_value;
+ size_t xi_value_len;
};
struct ocfs2_xattr_search {
@@ -137,6 +138,115 @@
int not_found;
};
+/* Operations on struct ocfs2_xa_entry */
+struct ocfs2_xa_loc;
+struct ocfs2_xa_loc_operations {
+ /*
+ * Journal functions
+ */
+ int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type);
+ void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
+
+ /*
+ * Return a pointer to the appropriate buffer in loc->xl_storage
+ * at the given offset from loc->xl_header.
+ */
+ void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
+
+ /* Can we reuse the existing entry for the new value? */
+ int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /* How much space is needed for the new value? */
+ int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /*
+ * Return the offset of the first name+value pair. This is
+ * the start of our downward-filling free space.
+ */
+ int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
+
+ /*
+ * Remove the name+value at this location. Do whatever is
+ * appropriate with the remaining name+value pairs.
+ */
+ void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
+
+ /* Fill xl_entry with a new entry */
+ void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
+
+ /* Add name+value storage to an entry */
+ void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
+
+ /*
+ * Initialize the value buf's access and bh fields for this entry.
+ * ocfs2_xa_fill_value_buf() will handle the xv pointer.
+ */
+ void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb);
+};
+
+/*
+ * Describes an xattr entry location. This is a memory structure
+ * tracking the on-disk structure.
+ */
+struct ocfs2_xa_loc {
+ /* This xattr belongs to this inode */
+ struct inode *xl_inode;
+
+ /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
+ struct ocfs2_xattr_header *xl_header;
+
+ /* Bytes from xl_header to the end of the storage */
+ int xl_size;
+
+ /*
+ * The ocfs2_xattr_entry this location describes. If this is
+ * NULL, this location describes the on-disk structure where it
+ * would have been.
+ */
+ struct ocfs2_xattr_entry *xl_entry;
+
+ /*
+ * Internal housekeeping
+ */
+
+ /* Buffer(s) containing this entry */
+ void *xl_storage;
+
+ /* Operations on the storage backing this location */
+ const struct ocfs2_xa_loc_operations *xl_ops;
+};
+
+/*
+ * Convenience functions to calculate how much space is needed for a
+ * given name+value pair
+ */
+static int namevalue_size(int name_len, uint64_t value_len)
+{
+ if (value_len > OCFS2_XATTR_INLINE_SIZE)
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ else
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
+}
+
+static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size(xi->xi_name_len, xi->xi_value_len);
+}
+
+static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
+{
+ u64 value_len = le64_to_cpu(xe->xe_value_size);
+
+ BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
+ ocfs2_xattr_is_local(xe));
+ return namevalue_size(xe->xe_name_len, value_len);
+}
+
+
static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
@@ -212,14 +322,6 @@
return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
}
-static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
-{
- u16 len = sb->s_blocksize -
- offsetof(struct ocfs2_xattr_header, xh_entries);
-
- return len / sizeof(struct ocfs2_xattr_entry);
-}
-
#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
@@ -463,35 +565,22 @@
return hash;
}
-/*
- * ocfs2_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static void ocfs2_xattr_hash_entry(struct inode *inode,
- struct ocfs2_xattr_header *header,
- struct ocfs2_xattr_entry *entry)
-{
- u32 hash = 0;
- char *name = (char *)header + le16_to_cpu(entry->xe_name_offset);
-
- hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
- entry->xe_name_hash = cpu_to_le32(hash);
-
- return;
-}
-
static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
{
- int size = 0;
+ return namevalue_size(name_len, value_len) +
+ sizeof(struct ocfs2_xattr_entry);
+}
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
- else
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- size += sizeof(struct ocfs2_xattr_entry);
+static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size_xi(xi) +
+ sizeof(struct ocfs2_xattr_entry);
+}
- return size;
+static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
+{
+ return namevalue_size_xe(xe) +
+ sizeof(struct ocfs2_xattr_entry);
}
int ocfs2_calc_security_init(struct inode *dir,
@@ -1308,452 +1397,897 @@
return ret;
}
-static int ocfs2_xattr_cleanup(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
+ int num_entries)
{
- int ret = 0;
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ int free_space;
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- /* Decrease xattr count */
- le16_add_cpu(&xs->header->xh_count, -1);
- /* Remove the xattr entry and tree root which has already be set*/
- memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
- memset(val, 0, size);
+ if (!needed_space)
+ return 0;
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
-}
-
-static int ocfs2_xattr_update_entry(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
-{
- int ret;
-
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- xs->here->xe_name_offset = cpu_to_le16(offs);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE)
- ocfs2_xattr_set_local(xs->here, 1);
- else
- ocfs2_xattr_set_local(xs->here, 0);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
-
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
-}
-
-/*
- * ocfs2_xattr_set_value_outside()
- *
- * Set large size value in B tree.
- */
-static int ocfs2_xattr_set_value_outside(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
-{
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- struct ocfs2_xattr_value_root *xv = NULL;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- int ret = 0;
-
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
- xv->xr_clusters = 0;
- xv->xr_last_eb_blk = 0;
- xv->xr_list.l_tree_depth = 0;
- xv->xr_list.l_count = cpu_to_le16(1);
- xv->xr_list.l_next_free_rec = 0;
- vb->vb_xv = xv;
-
- ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
- }
- ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
- }
- ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
- xi->value, xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
-
- return ret;
-}
-
-/*
- * ocfs2_xattr_set_entry_local()
- *
- * Set, replace or remove extended attribute in local.
- */
-static void ocfs2_xattr_set_entry_local(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_entry *last,
- size_t min_offs)
-{
- size_t name_len = strlen(xi->name);
- int i;
-
- if (xi->value && xs->not_found) {
- /* Insert the new xattr entry. */
- le16_add_cpu(&xs->header->xh_count, 1);
- ocfs2_xattr_set_type(last, xi->name_index);
- ocfs2_xattr_set_local(last, 1);
- last->xe_name_len = name_len;
- } else {
- void *first_val;
- void *val;
- size_t offs, size;
-
- first_val = xs->base + min_offs;
- offs = le16_to_cpu(xs->here->xe_name_offset);
- val = xs->base + offs;
-
- if (le64_to_cpu(xs->here->xe_value_size) >
- OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
-
- if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- /* The old and the new value have the
- same size. Just replace the value. */
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- /* Clear value bytes. */
- memset(val + OCFS2_XATTR_SIZE(name_len),
- 0,
- OCFS2_XATTR_SIZE(xi->value_len));
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- return;
- }
- /* Remove the old name+value. */
- memmove(first_val + size, first_val, val - first_val);
- memset(first_val, 0, size);
- xs->here->xe_name_hash = 0;
- xs->here->xe_name_offset = 0;
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = 0;
-
- min_offs += size;
-
- /* Adjust all value offsets. */
- last = xs->header->xh_entries;
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t o = le16_to_cpu(last->xe_name_offset);
-
- if (o < offs)
- last->xe_name_offset = cpu_to_le16(o + size);
- last += 1;
- }
-
- if (!xi->value) {
- /* Remove the old entry. */
- last -= 1;
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xs->header->xh_count, -1);
- }
- }
- if (xi->value) {
- /* Insert the new name+value. */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
- void *val = xs->base + min_offs - size;
-
- xs->here->xe_name_offset = cpu_to_le16(min_offs - size);
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xs->here, 1);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
- }
-
- return;
-}
-
-/*
- * ocfs2_xattr_set_entry()
- *
- * Set extended attribute entry into inode or block.
- *
- * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
- * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
- * then set value in B tree with set_value_outside().
- */
-static int ocfs2_xattr_set_entry(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- int flag)
-{
- struct ocfs2_xattr_entry *last;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name);
- size_t size_l = 0;
- handle_t *handle = ctxt->handle;
- int free, i, ret;
- struct ocfs2_xattr_info xi_l = {
- .name_index = xi->name_index,
- .name = xi->name,
- .value = xi->value,
- .value_len = xi->value_len,
- };
- struct ocfs2_xattr_value_buf vb = {
- .vb_bh = xs->xattr_bh,
- .vb_access = ocfs2_journal_access_di,
- };
-
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- BUG_ON(xs->xattr_bh == xs->inode_bh);
- vb.vb_access = ocfs2_journal_access_xb;
- } else
- BUG_ON(xs->xattr_bh != xs->inode_bh);
-
- /* Compute min_offs, last and free space. */
- last = xs->header->xh_entries;
-
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t offs = le16_to_cpu(last->xe_name_offset);
- if (offs < min_offs)
- min_offs = offs;
- last += 1;
- }
-
- free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
- if (free < 0)
+ free_space = free_start -
+ sizeof(struct ocfs2_xattr_header) -
+ (num_entries * sizeof(struct ocfs2_xattr_entry)) -
+ OCFS2_XATTR_HEADER_GAP;
+ if (free_space < 0)
return -EIO;
+ if (free_space < needed_space)
+ return -ENOSPC;
- if (!xs->not_found) {
- size_t size = 0;
- if (ocfs2_xattr_is_local(xs->here))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
+ return 0;
+}
+
+static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type)
+{
+ return loc->xl_ops->xlo_journal_access(handle, loc, type);
+}
+
+static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_journal_dirty(handle, loc);
+}
+
+/* Give a pointer into the storage for the given offset */
+static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
+{
+ BUG_ON(offset >= loc->xl_size);
+ return loc->xl_ops->xlo_offset_pointer(loc, offset);
+}
+
+/*
+ * Wipe the name+value pair and allow the storage to reclaim it. This
+ * must be followed by either removal of the entry or a call to
+ * ocfs2_xa_add_namevalue().
+ */
+static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_wipe_namevalue(loc);
+}
+
+/*
+ * Find lowest offset to a name+value pair. This is the start of our
+ * downward-growing free space.
+ */
+static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ return loc->xl_ops->xlo_get_free_start(loc);
+}
+
+/* Can we reuse loc->xl_entry for xi? */
+static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_can_reuse(loc, xi);
+}
+
+/* How much free space is needed to set the new value */
+static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_check_space(loc, xi);
+}
+
+static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ loc->xl_ops->xlo_add_entry(loc, name_hash);
+ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+ /*
+ * We can't leave the new entry's xe_name_offset at zero or
+ * add_namevalue() will go nuts. We set it to the size of our
+ * storage so that it can never be less than any other entry.
+ */
+ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+}
+
+static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int size = namevalue_size_xi(xi);
+ int nameval_offset;
+ char *nameval_buf;
+
+ loc->xl_ops->xlo_add_namevalue(loc, size);
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ loc->xl_entry->xe_name_len = xi->xi_name_len;
+ ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
+ ocfs2_xattr_set_local(loc->xl_entry,
+ xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
+
+ nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ memset(nameval_buf, 0, size);
+ memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
+}
+
+static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+
+ /* Value bufs are for value trees */
+ BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
+ BUG_ON(namevalue_size_xe(loc->xl_entry) !=
+ (name_size + OCFS2_XATTR_ROOT_SIZE));
+
+ loc->xl_ops->xlo_fill_value_buf(loc, vb);
+ vb->vb_xv =
+ (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
+ nameval_offset +
+ name_size);
+}
+
+static int ocfs2_xa_block_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
+{
+ struct buffer_head *bh = loc->xl_storage;
+ ocfs2_journal_access_func access;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ access = ocfs2_journal_access_xb;
+ else
+ access = ocfs2_journal_access_di;
+ return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
+}
+
+static void ocfs2_xa_block_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ ocfs2_journal_dirty(handle, bh);
+}
+
+static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ return (char *)loc->xl_header + offset;
+}
+
+static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ /*
+ * Block storage is strict. If the sizes aren't exact, we will
+ * remove the old one and reinsert the new.
+ */
+ return namevalue_size_xe(loc->xl_entry) ==
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int i, count = le16_to_cpu(xh->xh_count);
+ int offset, free_start = loc->xl_size;
+
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < free_start)
+ free_start = offset;
+ }
+
+ return free_start;
+}
+
+static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+
+ /*
+ * Block storage will reclaim the original entry before inserting
+ * the new value, so we only need the difference. If the new
+ * entry is smaller than the old one, we don't need anything.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
- free += (size + sizeof(struct ocfs2_xattr_entry));
+ needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
}
- /* Check free space in inode or block */
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE) {
- ret = -ENOSPC;
- goto out;
- }
- size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- xi_l.value = (void *)&def_xv;
- xi_l.value_len = OCFS2_XATTR_ROOT_SIZE;
- } else if (xi->value) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- ret = -ENOSPC;
- goto out;
+ if (needed_space < 0)
+ needed_space = 0;
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
+
+/*
+ * Block storage for xattrs keeps the name+value pairs compacted. When
+ * we remove one, we have to shift any that preceded it towards the end.
+ */
+static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ int i, offset;
+ int namevalue_offset, first_namevalue_offset, namevalue_size;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+
+ namevalue_offset = le16_to_cpu(entry->xe_name_offset);
+ namevalue_size = namevalue_size_xe(entry);
+ first_namevalue_offset = ocfs2_xa_get_free_start(loc);
+
+ /* Shift the name+value pairs */
+ memmove((char *)xh + first_namevalue_offset + namevalue_size,
+ (char *)xh + first_namevalue_offset,
+ namevalue_offset - first_namevalue_offset);
+ memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
+
+ /* Now tell xh->xh_entries about it */
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < namevalue_offset)
+ le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
+ namevalue_size);
+ }
+
+ /*
+ * Note that we don't update xh_free_start or xh_name_value_len
+ * because they're not used in block-stored xattrs.
+ */
+}
+
+static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ loc->xl_entry = &(loc->xl_header->xh_entries[count]);
+ le16_add_cpu(&loc->xl_header->xh_count, 1);
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
+
+static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+
+ loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
+}
+
+static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ vb->vb_access = ocfs2_journal_access_xb;
+ else
+ vb->vb_access = ocfs2_journal_access_di;
+ vb->vb_bh = bh;
+}
+
+/*
+ * Operations for xattrs stored in blocks. This includes inline inode
+ * storage and unindexed ocfs2_xattr_blocks.
+ */
+static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_block_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
+ .xlo_check_space = ocfs2_xa_block_check_space,
+ .xlo_can_reuse = ocfs2_xa_block_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_block_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_block_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
+};
+
+static int ocfs2_xa_bucket_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+
+ return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
+}
+
+static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+
+ ocfs2_xattr_bucket_journal_dirty(handle, bucket);
+}
+
+static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ int block, block_offset;
+
+ /* The header is at the front of the bucket */
+ block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
+ block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
+
+ return bucket_block(bucket, block) + block_offset;
+}
+
+static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size_xe(loc->xl_entry) >=
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
+}
+
+static int ocfs2_bucket_align_free_start(struct super_block *sb,
+ int free_start, int size)
+{
+ /*
+ * We need to make sure that the name+value pair fits within
+ * one block.
+ */
+ if (((free_start - size) >> sb->s_blocksize_bits) !=
+ ((free_start - 1) >> sb->s_blocksize_bits))
+ free_start -= free_start % sb->s_blocksize;
+
+ return free_start;
+}
+
+static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int rc;
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+ int size = namevalue_size_xi(xi);
+ struct super_block *sb = loc->xl_inode->i_sb;
+
+ /*
+ * Bucket storage does not reclaim name+value pairs it cannot
+ * reuse. They live as holes until the bucket fills, and then
+ * the bucket is defragmented. However, the bucket can reclaim
+ * the ocfs2_xattr_entry.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
+ else
+ needed_space -= sizeof(struct ocfs2_xattr_entry);
+ }
+ BUG_ON(needed_space < 0);
+
+ if (free_start < size) {
+ if (needed_space)
+ return -ENOSPC;
+ } else {
+ /*
+ * First we check if it would fit in the first place.
+ * Below, we align the free start to a block. This may
+ * slide us below the minimum gap. By checking unaligned
+ * first, we avoid that error.
+ */
+ rc = ocfs2_xa_check_space_helper(needed_space, free_start,
+ count);
+ if (rc)
+ return rc;
+ free_start = ocfs2_bucket_align_free_start(sb, free_start,
+ size);
+ }
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
+
+static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ le16_add_cpu(&loc->xl_header->xh_name_value_len,
+ -namevalue_size_xe(loc->xl_entry));
+}
+
+static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+ int low = 0, high = count - 1, tmp;
+ struct ocfs2_xattr_entry *tmp_xe;
+
+ /*
+ * We keep buckets sorted by name_hash, so we need to find
+ * our insert place.
+ */
+ while (low <= high && count) {
+ tmp = (low + high) / 2;
+ tmp_xe = &xh->xh_entries[tmp];
+
+ if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
+ low = tmp + 1;
+ else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
+ high = tmp - 1;
+ else {
+ low = tmp;
+ break;
}
}
- if (!xs->not_found) {
- /* For existing extended attribute */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- void *val = xs->base + offs;
+ if (low != count)
+ memmove(&xh->xh_entries[low + 1],
+ &xh->xh_entries[low],
+ ((count - low) * sizeof(struct ocfs2_xattr_entry)));
- if (ocfs2_xattr_is_local(xs->here) && size == size_l) {
- /* Replace existing local xattr with tree root */
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs,
- ctxt, &vb, offs);
- if (ret < 0)
- mlog_errno(ret);
- goto out;
- } else if (!ocfs2_xattr_is_local(xs->here)) {
- /* For existing xattr which has value outside */
- vb.vb_xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
+ le16_add_cpu(&xh->xh_count, 1);
+ loc->xl_entry = &xh->xh_entries[low];
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If new value need set outside also,
- * first truncate old value to new value,
- * then set new value with set_value_outside().
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- xi->value_len,
- ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset;
- ret = ocfs2_xattr_update_entry(inode,
- handle,
- xi,
- xs,
- &vb,
- offs);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
+ nameval_offset = free_start - size;
+ loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
+ xh->xh_free_start = cpu_to_le16(nameval_offset);
+ le16_add_cpu(&xh->xh_name_value_len, size);
- ret = __ocfs2_xattr_set_value_outside(inode,
- handle,
- &vb,
- xi->value,
- xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
+}
+
+static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int size = namevalue_size_xe(loc->xl_entry);
+ int block_offset = nameval_offset >> sb->s_blocksize_bits;
+
+ /* Values are not allowed to straddle block boundaries */
+ BUG_ON(block_offset !=
+ ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
+ /* We expect the bucket to be filled in */
+ BUG_ON(!bucket->bu_bhs[block_offset]);
+
+ vb->vb_access = ocfs2_journal_access;
+ vb->vb_bh = bucket->bu_bhs[block_offset];
+}
+
+/* Operations for xattrs stored in buckets. */
+static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_bucket_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
+ .xlo_check_space = ocfs2_xa_bucket_check_space,
+ .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_bucket_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
+};
+
+static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_value_buf vb;
+
+ if (ocfs2_xattr_is_local(loc->xl_entry))
+ return 0;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ return le32_to_cpu(vb.vb_xv->xr_clusters);
+}
+
+static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int trunc_rc, access_rc;
+ struct ocfs2_xattr_value_buf vb;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
+ ctxt);
+
+ /*
+ * The caller of ocfs2_xa_value_truncate() has already called
+ * ocfs2_xa_journal_access on the loc. However, The truncate code
+ * calls ocfs2_extend_trans(). This may commit the previous
+ * transaction and open a new one. If this is a bucket, truncate
+ * could leave only vb->vb_bh set up for journaling. Meanwhile,
+ * the caller is expecting to dirty the entire bucket. So we must
+ * reset the journal work. We do this even if truncate has failed,
+ * as it could have failed after committing the extend.
+ */
+ access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+
+ /* Errors in truncate take precedence */
+ return trunc_rc ? trunc_rc : access_rc;
+}
+
+static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
+{
+ int index, count;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+
+ ocfs2_xa_wipe_namevalue(loc);
+ loc->xl_entry = NULL;
+
+ le16_add_cpu(&xh->xh_count, -1);
+ count = le16_to_cpu(xh->xh_count);
+
+ /*
+ * Only zero out the entry if there are more remaining. This is
+ * important for an empty bucket, as it keeps track of the
+ * bucket's hash value. It doesn't hurt empty block storage.
+ */
+ if (count) {
+ index = ((char *)entry - (char *)&xh->xh_entries) /
+ sizeof(struct ocfs2_xattr_entry);
+ memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
+ (count - index) * sizeof(struct ocfs2_xattr_entry));
+ memset(&xh->xh_entries[count], 0,
+ sizeof(struct ocfs2_xattr_entry));
+ }
+}
+
+/*
+ * If we have a problem adjusting the size of an external value during
+ * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
+ * in an intermediate state. For example, the value may be partially
+ * truncated.
+ *
+ * If the value tree hasn't changed, the extend/truncate went nowhere.
+ * We have nothing to do. The caller can treat it as a straight error.
+ *
+ * If the value tree got partially truncated, we now have a corrupted
+ * extended attribute. We're going to wipe its entry and leak the
+ * clusters. Better to leak some storage than leave a corrupt entry.
+ *
+ * If the value tree grew, it obviously didn't grow enough for the
+ * new entry. We're not going to try and reclaim those clusters either.
+ * If there was already an external value there (orig_clusters != 0),
+ * the new clusters are attached safely and we can just leave the old
+ * value in place. If there was no external value there, we remove
+ * the entry.
+ *
+ * This way, the xattr block we store in the journal will be consistent.
+ * If the size change broke because of the journal, no changes will hit
+ * disk anyway.
+ */
+static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
+ const char *what,
+ unsigned int orig_clusters)
+{
+ unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
+ char *nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+
+ if (new_clusters < orig_clusters) {
+ mlog(ML_ERROR,
+ "Partial truncate while %s xattr %.*s. Leaking "
+ "%u clusters and removing the entry\n",
+ what, loc->xl_entry->xe_name_len, nameval_buf,
+ orig_clusters - new_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (!orig_clusters) {
+ mlog(ML_ERROR,
+ "Unable to allocate an external value for xattr "
+ "%.*s safely. Leaking %u clusters and removing the "
+ "entry\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (new_clusters > orig_clusters)
+ mlog(ML_ERROR,
+ "Unable to grow xattr %.*s safely. %u new clusters "
+ "have been added, but the value will not be "
+ "modified\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+}
+
+static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ /*
+ * Since this is remove, we can return 0 if
+ * ocfs2_xa_cleanup_value_truncate() is going to
+ * wipe the entry anyway. So we check the
+ * cluster count as well.
+ */
+ if (orig_clusters != ocfs2_xa_value_clusters(loc))
+ rc = 0;
+ ocfs2_xa_cleanup_value_truncate(loc, "removing",
+ orig_clusters);
+ if (rc)
goto out;
- } else {
- /*
- * If new value need set in local,
- * just trucate old value to zero.
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- 0,
- ctxt);
- if (ret < 0)
- mlog_errno(ret);
+ }
+ }
+
+ ocfs2_xa_remove_entry(loc);
+
+out:
+ return rc;
+}
+
+static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
+{
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+ char *nameval_buf;
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
+}
+
+/*
+ * Take an existing entry and make it ready for the new value. This
+ * won't allocate space, but it may free space. It should be ready for
+ * ocfs2_xa_prepare_entry() to finish the work.
+ */
+static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ unsigned int orig_clusters;
+ char *nameval_buf;
+ int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
+ int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
+
+ BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
+ name_size);
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ if (xe_local) {
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) - name_size);
+ if (!xi_local)
+ ocfs2_xa_install_value_root(loc);
+ } else {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ if (xi_local) {
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
+ else
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) -
+ name_size);
+ } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
+ xi->xi_value_len) {
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
+ ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
+ }
+
+ if (rc) {
+ ocfs2_xa_cleanup_value_truncate(loc, "reusing",
+ orig_clusters);
+ goto out;
+ }
+ }
+
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ ocfs2_xattr_set_local(loc->xl_entry, xi_local);
+
+out:
+ return rc;
+}
+
+/*
+ * Prepares loc->xl_entry to receive the new xattr. This includes
+ * properly setting up the name+value pair region. If loc->xl_entry
+ * already exists, it will take care of modifying it appropriately.
+ *
+ * Note that this modifies the data. You did journal_access already,
+ * right?
+ */
+static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ u32 name_hash,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+ __le64 orig_value_size = 0;
+
+ rc = ocfs2_xa_check_space(loc, xi);
+ if (rc)
+ goto out;
+
+ if (loc->xl_entry) {
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
+ goto out;
}
}
+ ocfs2_xa_wipe_namevalue(loc);
+ } else
+ ocfs2_xa_add_entry(loc, name_hash);
+
+ /*
+ * If we get here, we have a blank entry. Fill it. We grow our
+ * name+value pair back from the end.
+ */
+ ocfs2_xa_add_namevalue(loc, xi);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
+ ocfs2_xa_install_value_root(loc);
+
+alloc_value:
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
+ if (rc < 0) {
+ /*
+ * If we tried to grow an existing external value,
+ * ocfs2_xa_cleanuP-value_truncate() is going to
+ * let it stand. We have to restore its original
+ * value size.
+ */
+ loc->xl_entry->xe_value_size = orig_value_size;
+ ocfs2_xa_cleanup_value_truncate(loc, "growing",
+ orig_clusters);
+ mlog_errno(rc);
+ }
}
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
+out:
+ return rc;
+}
+
+/*
+ * Store the value portion of the name+value pair. This will skip
+ * values that are stored externally. Their tree roots were set up
+ * by ocfs2_xa_prepare_entry().
+ */
+static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ char *nameval_buf;
+ struct ocfs2_xattr_value_buf vb;
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
+ ctxt->handle, &vb,
+ xi->xi_value,
+ xi->xi_value_len);
+ } else
+ memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
+
+ return rc;
+}
+
+static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
+ xi->xi_name_len);
+
+ ret = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
/*
- * Set value in local, include set tree root in local.
- * This is the first step for value size >INLINE_SIZE.
+ * From here on out, everything is going to modify the buffer a
+ * little. Errors are going to leave the xattr header in a
+ * sane state. Thus, even with errors we dirty the sucker.
*/
- ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = ocfs2_journal_dirty(handle, xs->xattr_bh);
- if (ret < 0) {
+ /* Don't worry, we are never called with !xi_value and !xl_entry */
+ if (!xi->xi_value) {
+ ret = ocfs2_xa_remove(loc, ctxt);
+ goto out_dirty;
+ }
+
+ ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
mlog_errno(ret);
- goto out;
- }
+ goto out_dirty;
}
- if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) &&
- (flag & OCFS2_INLINE_XATTR_FL)) {
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- unsigned int xattrsize = osb->s_xattr_inline_size;
-
- /*
- * Adjust extent record count or inline data size
- * to reserve space for extended attribute.
- */
- if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- struct ocfs2_inline_data *idata = &di->id2.i_data;
- le16_add_cpu(&idata->id_count, -xattrsize);
- } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
- struct ocfs2_extent_list *el = &di->id2.i_list;
- le16_add_cpu(&el->l_count, -(xattrsize /
- sizeof(struct ocfs2_extent_rec)));
- }
- di->i_xattr_inline_size = cpu_to_le16(xattrsize);
- }
- /* Update xattr flag */
- spin_lock(&oi->ip_lock);
- oi->ip_dyn_features |= flag;
- di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
- spin_unlock(&oi->ip_lock);
-
- ret = ocfs2_journal_dirty(handle, xs->inode_bh);
- if (ret < 0)
+ ret = ocfs2_xa_store_value(loc, xi, ctxt);
+ if (ret)
mlog_errno(ret);
- if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * Set value outside in B tree.
- * This is the second step for value size > INLINE_SIZE.
- */
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
- &vb, offs);
- if (ret < 0) {
- int ret2;
+out_dirty:
+ ocfs2_xa_journal_dirty(ctxt->handle, loc);
- mlog_errno(ret);
- /*
- * If set value outside failed, we have to clean
- * the junk tree root we have already set in local.
- */
- ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
- xi, xs, &vb, offs);
- if (ret2 < 0)
- mlog_errno(ret2);
- }
- }
out:
return ret;
}
+static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
+
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_entry = entry;
+ loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
+ loc->xl_header =
+ (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
+ loc->xl_size);
+}
+
+static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)bh->b_data;
+
+ BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_header = &(xb->xb_attrs.xb_header);
+ loc->xl_entry = entry;
+ loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header);
+}
+
+static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_bucket *bucket,
+ struct ocfs2_xattr_entry *entry)
+{
+ loc->xl_inode = bucket->bu_inode;
+ loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
+ loc->xl_storage = bucket;
+ loc->xl_header = bucket_xh(bucket);
+ loc->xl_entry = entry;
+ loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
+}
+
/*
* In xattr remove, if it is stored outside and refcounted, we may have
* the chance to split the refcount tree. So need the allocators.
@@ -2149,6 +2683,55 @@
return 0;
}
+static int ocfs2_xattr_ibody_init(struct inode *inode,
+ struct buffer_head *di_bh,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ unsigned int xattrsize = osb->s_xattr_inline_size;
+
+ if (!ocfs2_xattr_has_space_inline(inode, di)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Adjust extent record count or inline data size
+ * to reserve space for extended attribute.
+ */
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+ le16_add_cpu(&idata->id_count, -xattrsize);
+ } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
+ struct ocfs2_extent_list *el = &di->id2.i_list;
+ le16_add_cpu(&el->l_count, -(xattrsize /
+ sizeof(struct ocfs2_extent_rec)));
+ }
+ di->i_xattr_inline_size = cpu_to_le16(xattrsize);
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
/*
* ocfs2_xattr_ibody_set()
*
@@ -2160,9 +2743,10 @@
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
+ int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- int ret;
+ struct ocfs2_xa_loc loc;
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
return -ENOSPC;
@@ -2175,8 +2759,25 @@
}
}
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL));
+ if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
+ ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ xs->here = loc.xl_entry;
+
out:
up_write(&oi->ip_alloc_sem);
@@ -2236,12 +2837,11 @@
return ret;
}
-static int ocfs2_create_xattr_block(handle_t *handle,
- struct inode *inode,
+static int ocfs2_create_xattr_block(struct inode *inode,
struct buffer_head *inode_bh,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **ret_bh,
- int indexed)
+ struct ocfs2_xattr_set_ctxt *ctxt,
+ int indexed,
+ struct buffer_head **ret_bh)
{
int ret;
u16 suballoc_bit_start;
@@ -2252,14 +2852,14 @@
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk;
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
+ inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
- ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret < 0) {
@@ -2270,7 +2870,7 @@
new_bh = sb_getblk(inode->i_sb, first_blkno);
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
- ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
+ ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
@@ -2282,11 +2882,10 @@
xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
memset(xblk, 0, inode->i_sb->s_blocksize);
strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
- xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
+ xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
xblk->xb_blkno = cpu_to_le64(first_blkno);
-
if (indexed) {
struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
xr->xt_clusters = cpu_to_le32(1);
@@ -2297,14 +2896,17 @@
xr->xt_list.l_next_free_rec = cpu_to_le16(1);
xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
}
+ ocfs2_journal_dirty(ctxt->handle, new_bh);
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
+ /* Add it to the inode */
di->i_xattr_loc = cpu_to_le64(first_blkno);
- ocfs2_journal_dirty(handle, inode_bh);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ ocfs2_journal_dirty(ctxt->handle, inode_bh);
*ret_bh = new_bh;
new_bh = NULL;
@@ -2326,13 +2928,13 @@
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct buffer_head *new_bh = NULL;
- handle_t *handle = ctxt->handle;
struct ocfs2_xattr_block *xblk = NULL;
int ret;
+ struct ocfs2_xa_loc loc;
if (!xs->xattr_bh) {
- ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
- ctxt->meta_ac, &new_bh, 0);
+ ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
+ 0, &new_bh);
if (ret) {
mlog_errno(ret);
goto end;
@@ -2348,21 +2950,25 @@
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
- /* Set extended attribute into external block */
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- OCFS2_HAS_XATTR_FL);
- if (!ret || ret != -ENOSPC)
- goto end;
+ ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
+ xs->not_found ? NULL : xs->here);
- ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
- if (ret)
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret)
+ xs->here = loc.xl_entry;
+ else if (ret != -ENOSPC)
goto end;
+ else {
+ ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
+ if (ret)
+ goto end;
+ }
}
- ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
+ if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
+ ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
end:
-
return ret;
}
@@ -2371,7 +2977,6 @@
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs)
{
- u64 value_size;
struct ocfs2_xattr_entry *last;
int free, i;
size_t min_offs = xs->end - xs->base;
@@ -2394,13 +2999,7 @@
BUG_ON(!xs->not_found);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
-
- if (free >= sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
+ if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
return 1;
return 0;
@@ -2424,7 +3023,7 @@
char *base = NULL;
int name_offset, name_len = 0;
u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
- xi->value_len);
+ xi->xi_value_len);
u64 value_size;
/*
@@ -2432,14 +3031,14 @@
* No matter whether we replace an old one or add a new one,
* we need this for writing.
*/
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
credits += new_clusters *
ocfs2_clusters_to_blocks(inode->i_sb, 1);
if (xis->not_found && xbs->not_found) {
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
clusters_add += new_clusters;
credits += ocfs2_calc_extend_credits(inode->i_sb,
&def_xv.xv.xr_list,
@@ -2484,7 +3083,7 @@
* The credits for removing the value tree will be extended
* by ocfs2_remove_extent itself.
*/
- if (!xi->value) {
+ if (!xi->xi_value) {
if (!ocfs2_xattr_is_local(xe))
credits += ocfs2_remove_extent_credits(inode->i_sb);
@@ -2514,7 +3113,7 @@
}
}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
/* the new values will be stored outside. */
u32 old_clusters = 0;
@@ -2547,9 +3146,10 @@
* value, we don't need any allocation, otherwise we have
* to guess metadata allocation.
*/
- if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) ||
+ if ((ocfs2_xattr_is_local(xe) &&
+ (value_size >= xi->xi_value_len)) ||
(!ocfs2_xattr_is_local(xe) &&
- OCFS2_XATTR_ROOT_SIZE >= xi->value_len))
+ OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
goto out;
}
@@ -2639,7 +3239,7 @@
meta_add += extra_meta;
mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
- "credits = %d\n", xi->name, meta_add, clusters_add, *credits);
+ "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -2679,7 +3279,7 @@
{
int ret = 0, credits, old_found;
- if (!xi->value) {
+ if (!xi->xi_value) {
/* Remove existing extended attribute */
if (!xis->not_found)
ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
@@ -2693,8 +3293,8 @@
* If succeed and that extended attribute existing in
* external block, then we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
old_found = xis->not_found;
xis->not_found = -ENODATA;
@@ -2722,8 +3322,8 @@
} else if (ret == -ENOSPC) {
if (di->i_xattr_loc && !xbs->xattr_bh) {
ret = ocfs2_xattr_block_find(inode,
- xi->name_index,
- xi->name, xbs);
+ xi->xi_name_index,
+ xi->xi_name, xbs);
if (ret)
goto out;
@@ -2762,8 +3362,8 @@
* If succeed and that extended attribute
* existing in inode, we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
xbs->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
@@ -2829,10 +3429,11 @@
int ret;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -2912,10 +3513,11 @@
struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -3759,7 +4361,7 @@
struct ocfs2_xattr_bucket *bucket)
{
int ret, i;
- size_t end, offset, len, value_len;
+ size_t end, offset, len;
struct ocfs2_xattr_header *xh;
char *entries, *buf, *bucket_buf = NULL;
u64 blkno = bucket_blkno(bucket);
@@ -3813,12 +4415,7 @@
end = OCFS2_XATTR_BUCKET_SIZE;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
offset = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- value_len = OCFS2_XATTR_SIZE(
- le64_to_cpu(xe->xe_value_size));
- else
- value_len = OCFS2_XATTR_ROOT_SIZE;
- len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
+ len = namevalue_size_xe(xe);
/*
* We must make sure that the name/value pair
@@ -4007,7 +4604,7 @@
int new_bucket_head)
{
int ret, i;
- int count, start, len, name_value_len = 0, xe_len, name_offset = 0;
+ int count, start, len, name_value_len = 0, name_offset = 0;
struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
struct ocfs2_xattr_header *xh;
struct ocfs2_xattr_entry *xe;
@@ -4098,13 +4695,7 @@
name_value_len = 0;
for (i = 0; i < start; i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
- name_value_len += xe_len;
+ name_value_len += namevalue_size_xe(xe);
if (le16_to_cpu(xe->xe_name_offset) < name_offset)
name_offset = le16_to_cpu(xe->xe_name_offset);
}
@@ -4134,12 +4725,6 @@
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
if (le16_to_cpu(xe->xe_name_offset) <
le16_to_cpu(xh->xh_free_start))
xh->xh_free_start = xe->xe_name_offset;
@@ -4751,195 +5336,6 @@
}
/*
- * Handle the normal xattr set, including replace, delete and new.
- *
- * Note: "local" indicates the real data's locality. So we can't
- * just its bucket locality by its length.
- */
-static void ocfs2_xattr_set_entry_normal(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- struct ocfs2_xattr_entry *last, *xe;
- int name_len = strlen(xi->name);
- struct ocfs2_xattr_header *xh = xs->header;
- u16 count = le16_to_cpu(xh->xh_count), start;
- size_t blocksize = inode->i_sb->s_blocksize;
- char *val;
- size_t offs, size, new_size;
-
- last = &xh->xh_entries[count];
- if (!xs->not_found) {
- xe = xs->here;
- offs = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
-
- /*
- * If the new value will be stored outside, xi->value has been
- * initalized as an empty ocfs2_xattr_value_root, and the same
- * goes with xi->value_len, so we can set new_size safely here.
- * See ocfs2_xattr_set_in_bucket.
- */
- new_size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, -size);
- if (xi->value) {
- if (new_size > size)
- goto set_new_name_value;
-
- /* Now replace the old value with new one. */
- if (local)
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- else
- xe->xe_value_size = 0;
-
- val = ocfs2_xattr_bucket_get_val(inode,
- xs->bucket, offs);
- memset(val + OCFS2_XATTR_SIZE(name_len), 0,
- size - OCFS2_XATTR_SIZE(name_len));
- if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value, xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, new_size);
- ocfs2_xattr_set_local(xe, local);
- return;
- } else {
- /*
- * Remove the old entry if there is more than one.
- * We don't remove the last entry so that we can
- * use it to indicate the hash value of the empty
- * bucket.
- */
- last -= 1;
- le16_add_cpu(&xh->xh_count, -1);
- if (xh->xh_count) {
- memmove(xe, xe + 1,
- (void *)last - (void *)xe);
- memset(last, 0,
- sizeof(struct ocfs2_xattr_entry));
- } else
- xh->xh_free_start =
- cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
-
- return;
- }
- } else {
- /* find a new entry for insert. */
- int low = 0, high = count - 1, tmp;
- struct ocfs2_xattr_entry *tmp_xe;
-
- while (low <= high && count) {
- tmp = (low + high) / 2;
- tmp_xe = &xh->xh_entries[tmp];
-
- if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
- low = tmp + 1;
- else if (name_hash <
- le32_to_cpu(tmp_xe->xe_name_hash))
- high = tmp - 1;
- else {
- low = tmp;
- break;
- }
- }
-
- xe = &xh->xh_entries[low];
- if (low != count)
- memmove(xe + 1, xe, (void *)last - (void *)xe);
-
- le16_add_cpu(&xh->xh_count, 1);
- memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
- xe->xe_name_hash = cpu_to_le32(name_hash);
- xe->xe_name_len = name_len;
- ocfs2_xattr_set_type(xe, xi->name_index);
- }
-
-set_new_name_value:
- /* Insert the new name+value. */
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
-
- /*
- * We must make sure that the name/value pair
- * exists in the same block.
- */
- offs = le16_to_cpu(xh->xh_free_start);
- start = offs - size;
-
- if (start >> inode->i_sb->s_blocksize_bits !=
- (offs - 1) >> inode->i_sb->s_blocksize_bits) {
- offs = offs - offs % blocksize;
- xh->xh_free_start = cpu_to_le16(offs);
- }
-
- val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
- xe->xe_name_offset = cpu_to_le16(offs - size);
-
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
-
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xe, local);
- xs->here = xe;
- le16_add_cpu(&xh->xh_free_start, -size);
- le16_add_cpu(&xh->xh_name_value_len, size);
-
- return;
-}
-
-/*
- * Set the xattr entry in the specified bucket.
- * The bucket is indicated by xs->bucket and it should have the enough
- * space for the xattr insertion.
- */
-static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- int ret;
- u64 blkno;
-
- mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
- (unsigned long)xi->value_len, xi->name_index,
- (unsigned long long)bucket_blkno(xs->bucket));
-
- if (!xs->bucket->bu_bhs[1]) {
- blkno = bucket_blkno(xs->bucket);
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
-
- ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-
-out:
- return ret;
-}
-
-/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
* Both the ocfs2_xattr_value_root and the entry will be updated here.
@@ -5009,66 +5405,6 @@
return ret;
}
-static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
- struct ocfs2_xattr_search *xs,
- int len,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, offset;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
-
- BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
-
- offset = xe - xh->xh_entries;
- ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
- offset, len, ctxt);
- if (ret)
- mlog_errno(ret);
-
- return ret;
-}
-
-static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs,
- char *val,
- int value_len)
-{
- int ret, offset, block_off;
- struct ocfs2_xattr_value_root *xv;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- void *base;
- struct ocfs2_xattr_value_buf vb = {
- .vb_access = ocfs2_journal_access,
- };
-
- BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
-
- ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
- xe - xh->xh_entries,
- &block_off,
- &offset);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- base = bucket_block(xs->bucket, block_off);
- xv = (struct ocfs2_xattr_value_root *)(base + offset +
- OCFS2_XATTR_SIZE(xe->xe_name_len));
-
- vb.vb_xv = xv;
- vb.vb_bh = xs->bucket->bu_bhs[block_off];
- ret = __ocfs2_xattr_set_value_outside(inode, handle,
- &vb, val, value_len);
- if (ret)
- mlog_errno(ret);
-out:
- return ret;
-}
-
static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
@@ -5167,128 +5503,6 @@
return ret;
}
-static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs)
-{
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- struct ocfs2_xattr_entry *last = &xh->xh_entries[
- le16_to_cpu(xh->xh_count) - 1];
- int ret = 0;
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- return;
- }
-
- /* Remove the old entry. */
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xh->xh_count, -1);
-
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-}
-
-/*
- * Set the xattr name/value in the bucket specified in xs.
- *
- * As the new value in xi may be stored in the bucket or in an outside cluster,
- * we divide the whole process into 3 steps:
- * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
- * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
- * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
- * 4. If the clusters for the new outside value can't be allocated, we need
- * to free the xattr we allocated in set.
- */
-static int ocfs2_xattr_set_in_bucket(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, local = 1;
- size_t value_len;
- char *val = (char *)xi->value;
- struct ocfs2_xattr_entry *xe = xs->here;
- u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
- strlen(xi->name));
-
- if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
- /*
- * We need to truncate the xattr storage first.
- *
- * If both the old and new value are stored to
- * outside block, we only need to truncate
- * the storage and then set the value outside.
- *
- * If the new value should be stored within block,
- * we should free all the outside block first and
- * the modification to the xattr block will be done
- * by following steps.
- */
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_len = xi->value_len;
- else
- value_len = 0;
-
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len,
- ctxt);
- if (ret)
- goto out;
-
- if (value_len)
- goto set_value_outside;
- }
-
- value_len = xi->value_len;
- /* So we have to handle the inside block change now. */
- if (value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If the new value will be stored outside of block,
- * initalize a new empty value root and insert it first.
- */
- local = 0;
- xi->value = &def_xv;
- xi->value_len = OCFS2_XATTR_ROOT_SIZE;
- }
-
- ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
- name_hash, local);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- goto out;
-
- /* allocate the space now for the outside block storage. */
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len, ctxt);
- if (ret) {
- mlog_errno(ret);
-
- if (xs->not_found) {
- /*
- * We can't allocate enough clusters for outside
- * storage and we have allocated xattr already,
- * so need to remove it.
- */
- ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
- }
- goto out;
- }
-
-set_value_outside:
- ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
- xs, val, value_len);
-out:
- return ret;
-}
-
/*
* check whether the xattr bucket is filled up with the same hash value.
* If we want to insert the xattr with the same hash, return -ENOSPC.
@@ -5317,156 +5531,116 @@
return 0;
}
+/*
+ * Try to set the entry in the current bucket. If we fail, the caller
+ * will handle getting us another bucket.
+ */
+static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xs,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ struct ocfs2_xa_loc loc;
+
+ mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
+
+ ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
+ }
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* Ok, we need space. Let's try defragmenting the bucket. */
+ ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
+ xs->bucket);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
+ }
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+
+
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
- struct ocfs2_xattr_header *xh;
- struct ocfs2_xattr_entry *xe;
- u16 count, header_size, xh_free_start;
- int free, max_free, need, old;
- size_t value_size = 0, name_len = strlen(xi->name);
- size_t blocksize = inode->i_sb->s_blocksize;
- int ret, allocation = 0;
+ int ret;
- mlog_entry("Set xattr %s in xattr index block\n", xi->name);
+ mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
-try_again:
- xh = xs->header;
- count = le16_to_cpu(xh->xh_count);
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- header_size = sizeof(struct ocfs2_xattr_header) +
- count * sizeof(struct ocfs2_xattr_entry);
- max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
- le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
-
- mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
- "of %u which exceed block size\n",
- (unsigned long long)bucket_blkno(xs->bucket),
- header_size);
-
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else if (xi->value)
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
-
- if (xs->not_found)
- need = sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) + value_size;
- else {
- need = value_size + OCFS2_XATTR_SIZE(name_len);
-
- /*
- * We only replace the old value if the new length is smaller
- * than the old one. Otherwise we will allocate new space in the
- * bucket to store it.
- */
- xe = xs->here;
- if (ocfs2_xattr_is_local(xe))
- old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
-
- if (old >= value_size)
- need = 0;
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (!ret)
+ goto out;
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
}
- free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
+ /* Ack, need more space. Let's try to get another bucket! */
+
/*
- * We need to make sure the new name/value pair
- * can exist in the same block.
+ * We do not allow for overlapping ranges between buckets. And
+ * the maximum number of collisions we will allow for then is
+ * one bucket's worth, so check it here whether we need to
+ * add a new bucket for the insert.
*/
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
-
- mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
- "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
- " %u\n", xs->not_found,
- (unsigned long long)bucket_blkno(xs->bucket),
- free, need, max_free, le16_to_cpu(xh->xh_free_start),
- le16_to_cpu(xh->xh_name_value_len));
-
- if (free < need ||
- (xs->not_found &&
- count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
- if (need <= max_free &&
- count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
- /*
- * We can create the space by defragment. Since only the
- * name/value will be moved, the xe shouldn't be changed
- * in xs.
- */
- ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
- xs->bucket);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- free = xh_free_start - header_size
- - OCFS2_XATTR_HEADER_GAP;
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
-
- if (free >= need)
- goto xattr_set;
-
- mlog(0, "Can't get enough space for xattr insert by "
- "defragment. Need %u bytes, but we have %d, so "
- "allocate new bucket for it.\n", need, free);
- }
-
- /*
- * We have to add new buckets or clusters and one
- * allocation should leave us enough space for insert.
- */
- BUG_ON(allocation);
-
- /*
- * We do not allow for overlapping ranges between buckets. And
- * the maximum number of collisions we will allow for then is
- * one bucket's worth, so check it here whether we need to
- * add a new bucket for the insert.
- */
- ret = ocfs2_check_xattr_bucket_collision(inode,
- xs->bucket,
- xi->name);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- ret = ocfs2_add_new_xattr_bucket(inode,
- xs->xattr_bh,
+ ret = ocfs2_check_xattr_bucket_collision(inode,
xs->bucket,
- ctxt);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- /*
- * ocfs2_add_new_xattr_bucket() will have updated
- * xs->bucket if it moved, but it will not have updated
- * any of the other search fields. Thus, we drop it and
- * re-search. Everything should be cached, so it'll be
- * quick.
- */
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
- xi->name_index,
- xi->name, xs);
- if (ret && ret != -ENODATA)
- goto out;
- xs->not_found = ret;
- allocation = 1;
- goto try_again;
+ xi->xi_name);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
-xattr_set:
- ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt);
+ ret = ocfs2_add_new_xattr_bucket(inode,
+ xs->xattr_bh,
+ xs->bucket,
+ ctxt);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * ocfs2_add_new_xattr_bucket() will have updated
+ * xs->bucket if it moved, but it will not have updated
+ * any of the other search fields. Thus, we drop it and
+ * re-search. Everything should be cached, so it'll be
+ * quick.
+ */
+ ocfs2_xattr_bucket_relse(xs->bucket);
+ ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
+ xi->xi_name_index,
+ xi->xi_name, xs);
+ if (ret && ret != -ENODATA)
+ goto out;
+ xs->not_found = ret;
+
+ /* Ok, we have a new bucket, let's try again */
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (ret && (ret != -ENOSPC))
+ mlog_errno(ret);
+
out:
mlog_exit(ret);
return ret;
@@ -5678,7 +5852,7 @@
* refcount tree, and make the original extent become 3. So we will need
* 2 * cluster more extent recs at most.
*/
- if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
+ if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
ret = ocfs2_refcounted_xattr_delete_need(inode,
&(*ref_tree)->rf_ci,
@@ -6354,9 +6528,11 @@
int indexed)
{
int ret;
- handle_t *handle;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_xattr_set_ctxt ctxt = {
+ .meta_ac = meta_ac,
+ };
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret < 0) {
@@ -6364,21 +6540,21 @@
return ret;
}
- handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
+ ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
+ if (IS_ERR(ctxt.handle)) {
+ ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
goto out;
}
mlog(0, "create new xattr block for inode %llu, index = %d\n",
(unsigned long long)fe_bh->b_blocknr, indexed);
- ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
- meta_ac, ret_bh, indexed);
+ ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
+ ret_bh);
if (ret)
mlog_errno(ret);
- ocfs2_commit_trans(osb, handle);
+ ocfs2_commit_trans(osb, ctxt.handle);
out:
ocfs2_free_alloc_context(meta_ac);
return ret;
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 7ca7834..cfe90a4 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -12,37 +12,37 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/fs.h>
+#include <linux/syslog.h>
#include <asm/uaccess.h>
#include <asm/io.h>
extern wait_queue_head_t log_wait;
-extern int do_syslog(int type, char __user *bug, int count);
-
static int kmsg_open(struct inode * inode, struct file * file)
{
- return do_syslog(1,NULL,0);
+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
}
static int kmsg_release(struct inode * inode, struct file * file)
{
- (void) do_syslog(0,NULL,0);
+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
return 0;
}
static ssize_t kmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
+ if ((file->f_flags & O_NONBLOCK) &&
+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return -EAGAIN;
- return do_syslog(2, buf, count);
+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
}
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
- if (do_syslog(9, NULL, 0))
+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 70504fc..14dafd6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -245,7 +245,7 @@
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
atomic_t m_active_trans; /* number trans frozen */
#ifdef HAVE_PERCPU_SB
- xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
+ xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
unsigned long m_icsb_counters; /* disabled per-cpu counters */
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
struct mutex m_icsb_mutex; /* balancer sync lock */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 2983176..1172c27 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -238,7 +238,7 @@
extern int acpi_processor_preregister_performance(struct
acpi_processor_performance
- *performance);
+ __percpu *performance);
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fc21844..c8a5d68 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -52,23 +52,4 @@
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable (eg. mystruct.foo), not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-/* Non-atomic increments, ie. preemption disabled and won't be touched
- * in interrupt, etc. Some archs can optimize this case well.
- */
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 8087b90..04f91c2 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -41,7 +41,11 @@
* Only S390 provides its own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+/* Weird cast keeps both GCC and sparse happy. */
+#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
+ __verify_pcpu_ptr((__p)); \
+ RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
+})
#endif
/*
@@ -50,11 +54,11 @@
* offset.
*/
#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
@@ -66,9 +70,9 @@
#else /* ! SMP */
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
+#define __get_cpu_var(var) (var)
+#define __raw_get_cpu_var(var) (var)
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3b73b99..416bf62 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -150,8 +150,8 @@
struct blk_trace {
int trace_state;
struct rchan *rchan;
- unsigned long *sequence;
- unsigned char *msg_data;
+ unsigned long __percpu *sequence;
+ unsigned char __percpu *msg_data;
u16 act_mask;
u64 start_lba;
u64 end_lba;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b10ec49..266ab92 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -23,6 +23,7 @@
extern unsigned long saved_max_pfn;
#endif
+#ifndef CONFIG_NO_BOOTMEM
/*
* node_bootmem_map is a map pointer - the bits represent all physical
* memory pages (including holes) on the node.
@@ -37,6 +38,7 @@
} bootmem_data_t;
extern bootmem_data_t bootmem_node_data[];
+#endif
extern unsigned long bootmem_bootmap_pages(unsigned long);
@@ -46,6 +48,7 @@
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
+unsigned long free_all_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
@@ -84,6 +87,10 @@
unsigned long size,
unsigned long align,
unsigned long goal);
+void *__alloc_bootmem_node_high(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188fcae..a5a472b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
#ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1)))
-# define __kernel /* default address space */
+# define __kernel __attribute__((address_space(0)))
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 7878498..21fd9b7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -162,7 +162,7 @@
struct dma_chan_dev *dev;
struct list_head device_node;
- struct dma_chan_percpu *local;
+ struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
void *private;
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
new file mode 100644
index 0000000..29c09f5
--- /dev/null
+++ b/include/linux/early_res.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_EARLY_RES_H
+#define _LINUX_EARLY_RES_H
+#ifdef __KERNEL__
+
+extern void reserve_early(u64 start, u64 end, char *name);
+extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
+extern void free_early(u64 start, u64 end);
+void free_early_partial(u64 start, u64 end);
+extern void early_res_to_bootmem(u64 start, u64 end);
+
+void reserve_early_without_check(u64 start, u64 end, char *name);
+u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align);
+u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
+ u64 *sizep, u64 align);
+u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 get_max_mapped(void);
+#include <linux/range.h>
+int get_free_all_memory_range(struct range **rangep, int nodeid);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_EARLY_RES_H */
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 520ecf8..40b1101 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -248,13 +248,20 @@
#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
+/* available since kernel version 2.6.34 */
+#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
+
/*
* FW_CDEV_VERSION History
* 1 (2.6.22) - initial version
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
+ * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
+ * (2.6.33) - IR has always packet-per-buffer semantics now, not one of
+ * dual-buffer or packet-per-buffer depending on hardware
+ * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
*/
-#define FW_CDEV_VERSION 2
+#define FW_CDEV_VERSION 3
/**
* struct fw_cdev_get_info - General purpose information ioctl
@@ -544,14 +551,18 @@
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch
- * @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
+ * @cycle_timer: Cycle Time register contents
*
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
- * and also the system clock. This allows to express the receive time of an
- * isochronous packet as a system time with microsecond accuracy.
+ * and also the system clock (%CLOCK_REALTIME). This allows to express the
+ * receive time of an isochronous packet as a system time.
*
* @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
- * 12 bits cycleOffset, in host byte order.
+ * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
+ * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
+ *
+ * In version 1 and 2 of the ABI, this ioctl returned unreliable (non-
+ * monotonic) @cycle_timer values on certain controllers.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
@@ -559,6 +570,25 @@
};
/**
+ * struct fw_cdev_get_cycle_timer2 - read cycle timer register
+ * @tv_sec: system time, seconds
+ * @tv_nsec: system time, sub-seconds part in nanoseconds
+ * @clk_id: input parameter, clock from which to get the system time
+ * @cycle_timer: Cycle Time register contents
+ *
+ * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like
+ * %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX'
+ * clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME
+ * and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW.
+ */
+struct fw_cdev_get_cycle_timer2 {
+ __s64 tv_sec;
+ __s32 tv_nsec;
+ __s32 clk_id;
+ __u32 cycle_timer;
+};
+
+/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events
* @channels: Isochronous channels of which one is to be (de)allocated
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index a0e6715..4bd94bf 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -65,12 +65,13 @@
#define CSR_DIRECTORY_ID 0x20
struct fw_csr_iterator {
- u32 *p;
- u32 *end;
+ const u32 *p;
+ const u32 *end;
};
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
extern struct bus_type fw_bus_type;
@@ -162,7 +163,7 @@
struct mutex client_list_mutex;
struct list_head client_list;
- u32 *config_rom;
+ const u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
unsigned is_local:1;
@@ -204,7 +205,7 @@
*/
struct fw_unit {
struct device device;
- u32 *directory;
+ const u32 *directory;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9717081..56b5051 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -101,7 +101,7 @@
unsigned long stamp;
int in_flight[2];
#ifdef CONFIG_SMP
- struct disk_stats *dkstats;
+ struct disk_stats __percpu *dkstats;
#else
struct disk_stats dkstats;
#endif
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index 81f90a5..4f44629 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -180,33 +180,6 @@
};
/*
- * quota linked list: user quotas and group quotas form two separate
- * singly linked lists. ll_next stores uids or gids of next quotas in the
- * linked list.
-
-Given the uid/gid, how to calculate the quota file offsets for the corresponding
-gfs2_quota structures on disk:
-
-for user quotas, given uid,
-offset = uid * sizeof(struct gfs2_quota);
-
-for group quotas, given gid,
-offset = (gid * sizeof(struct gfs2_quota)) + sizeof(struct gfs2_quota);
-
-
- uid:0 gid:0 uid:12 gid:12 uid:17 gid:17 uid:5142 gid:5142
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-| valid | valid | :: | valid | valid | :: | valid | inval | :: | inval | valid |
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-next:12 next:12 next:17 next:5142 next:NULL next:NULL
- | | | | |<-- user quota list |
- \______|___________/ \______|___________/ group quota list -->|
- | | |
- \__________________/ \_______________________________________/
-
-*/
-
-/*
* quota structure
*/
@@ -214,8 +187,7 @@
__be64 qu_limit;
__be64 qu_warn;
__be64 qu_value;
- __be32 qu_ll_next; /* location of next quota in list */
- __u8 qu_reserved[60];
+ __u8 qu_reserved[64];
};
/*
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
new file mode 100644
index 0000000..63f57a8
--- /dev/null
+++ b/include/linux/i2c-smbus.h
@@ -0,0 +1,50 @@
+/*
+ * i2c-smbus.h - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_I2C_SMBUS_H
+#define _LINUX_I2C_SMBUS_H
+
+#include <linux/i2c.h>
+
+
+/**
+ * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
+ * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
+ * triggered
+ * @irq: IRQ number, if the smbus_alert driver should take care of interrupt
+ * handling
+ *
+ * If irq is not specified, the smbus_alert driver doesn't take care of
+ * interrupt handling. In that case it is up to the I2C bus driver to either
+ * handle the interrupts or to poll for alerts.
+ *
+ * If irq is specified then it it crucial that alert_edge_triggered is
+ * properly set.
+ */
+struct i2c_smbus_alert_setup {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+};
+
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
+int i2c_handle_smbus_alert(struct i2c_client *ara);
+
+#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 02fc617..0a5da63 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,6 +53,7 @@
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
+ * @count must be be less than 64k since msg.len is u16.
*/
extern int i2c_master_send(struct i2c_client *client, const char *buf,
int count);
@@ -152,6 +153,13 @@
int (*suspend)(struct i2c_client *, pm_message_t mesg);
int (*resume)(struct i2c_client *);
+ /* Alert callback, for example for the SMBus alert protocol.
+ * The format and meaning of the data value depends on the protocol.
+ * For the SMBus alert protocol, there is a single bit of data passed
+ * as the alert response's low bit ("event flag").
+ */
+ void (*alert)(struct i2c_client *, unsigned int data);
+
/* a ioctl like command that can be used to perform specific functions
* with the device.
*/
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d13492d..707ab12 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -400,7 +400,9 @@
/* Dynamic irq helper functions */
extern void dynamic_irq_init(unsigned int irq);
+void dynamic_irq_init_keep_chip_data(unsigned int irq);
extern void dynamic_irq_cleanup(unsigned int irq);
+void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
/* Set/get chip/data for an IRQ: */
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1221d23..7f07074 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,16 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c356b69..03e8e8d 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -199,7 +199,7 @@
*/
extern struct resource crashk_res;
typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
-extern note_buf_t *crash_notes;
+extern note_buf_t __percpu *crash_notes;
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b2fa85..90957f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
#include <linux/prio_tree.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/range.h>
struct mempolicy;
struct anon_vma;
@@ -1049,6 +1050,10 @@
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
+int add_from_early_node_map(struct range *range, int az,
+ int nr_range, int nid);
+void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
+ u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
@@ -1081,11 +1086,7 @@
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-#ifdef CONFIG_NUMA
extern void setup_per_cpu_pageset(void);
-#else
-static inline void setup_per_cpu_pageset(void) {}
-#endif
extern void zone_pcp_update(struct zone *zone);
@@ -1321,12 +1322,19 @@
const char * arch_vma_name(struct vm_area_struct *vma);
void print_vma_addr(char *prefix, unsigned long rip);
+void sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count,
+ int nodeid);
+
struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(struct page *start_page,
unsigned long pages, int node);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668..a01a103 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
-} ____cacheline_aligned_in_smp;
-
-#ifdef CONFIG_NUMA
-#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
-#else
-#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
-#endif
+};
#endif /* !__GENERATING_BOUNDS.H */
@@ -306,10 +300,8 @@
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
- struct per_cpu_pageset *pageset[NR_CPUS];
-#else
- struct per_cpu_pageset pageset[NR_CPUS];
#endif
+ struct per_cpu_pageset __percpu *pageset;
/*
* free areas of different sizes
*/
@@ -620,7 +612,9 @@
struct page_cgroup *node_page_cgroup;
#endif
#endif
+#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
+#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Must be held any time you expect node_start_pfn, node_present_pages
diff --git a/include/linux/module.h b/include/linux/module.h
index 6cb1a3c..dd618eb 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,7 +17,7 @@
#include <linux/moduleparam.h>
#include <linux/tracepoint.h>
-#include <asm/local.h>
+#include <linux/percpu.h>
#include <asm/module.h>
#include <trace/events/module.h>
@@ -363,11 +363,9 @@
/* Destruction function. */
void (*exit)(void);
-#ifdef CONFIG_SMP
- char *refptr;
-#else
- local_t ref;
-#endif
+ struct module_ref {
+ int count;
+ } __percpu *refptr;
#endif
#ifdef CONFIG_CONSTRUCTORS
@@ -454,25 +452,16 @@
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
-static inline local_t *__module_ref_addr(struct module *mod, int cpu)
-{
-#ifdef CONFIG_SMP
- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
-#else
- return &mod->ref;
-#endif
-}
-
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_inc(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
- put_cpu();
+ __this_cpu_read(module->refptr->count));
+ preempt_enable();
}
}
@@ -481,15 +470,17 @@
int ret = 1;
if (module) {
- unsigned int cpu = get_cpu();
+ preempt_disable();
+
if (likely(module_is_live(module))) {
- local_inc(__module_ref_addr(module, cpu));
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
}
else
ret = 0;
- put_cpu();
+
+ preempt_enable();
}
return ret;
}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d52753..b5f43a3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -66,7 +66,7 @@
int mnt_pinned;
int mnt_ghosts;
#ifdef CONFIG_SMP
- int *mnt_writers;
+ int __percpu *mnt_writers;
#else
int mnt_writers;
#endif
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 34fc6be..6a2e44f 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -105,7 +105,7 @@
struct rpc_clnt * client; /* RPC client handle */
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
- struct nfs_iostats * io_stats; /* I/O statistics */
+ struct nfs_iostats __percpu *io_stats; /* I/O statistics */
struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 3fe02cf..640702e 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -153,6 +153,7 @@
semantics also for data */
#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
mount-time recovery */
+#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */
/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0be8243..9f688d2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -770,7 +770,6 @@
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
#define PCI_DEVICE_ID_TI_4450 0x8011
-#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
@@ -2333,6 +2332,8 @@
#define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
+#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2697,6 +2698,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9865 0x9865
#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 5a5d6ce..68567c0 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -2,12 +2,6 @@
#define _LINUX_PERCPU_DEFS_H
/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
* 'sec' argument. This may be used to affect the parameters governing the
@@ -18,13 +12,23 @@
* that section.
*/
#define __PCPU_ATTRS(sec) \
- __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
__attribute__((section(".discard"), unused))
/*
+ * Macro which verifies @ptr is a percpu pointer without evaluating
+ * @ptr. This is to be used in percpu accessors to verify that the
+ * input parameter is a percpu pointer.
+ */
+#define __verify_pcpu_ptr(ptr) do { \
+ const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
+ (void)__vpp_verify; \
+} while (0)
+
+/*
* s390 and alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
@@ -56,24 +60,24 @@
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#endif
/*
@@ -135,10 +139,16 @@
__aligned(PAGE_SIZE)
/*
- * Intermodule exports for per-CPU variables.
+ * Intermodule exports for per-CPU variables. sparse forgets about
+ * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
+ * noop if __CHECKER__.
*/
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
+#ifndef __CHECKER__
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#else
+#define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#endif
#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cf5efbc..a93e5bf 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -27,10 +27,17 @@
* we force a syntax error here if it isn't.
*/
#define get_cpu_var(var) (*({ \
- extern int simple_identifier_##var(void); \
preempt_disable(); \
&__get_cpu_var(var); }))
-#define put_cpu_var(var) preempt_enable()
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var) do { \
+ (void)&(var); \
+ preempt_enable(); \
+} while (0)
#ifdef CONFIG_SMP
@@ -127,9 +134,9 @@
*/
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
-extern void *__alloc_reserved_percpu(size_t size, size_t align);
-extern void *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void *__pdata);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -140,7 +147,7 @@
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void *__alloc_percpu(size_t size, size_t align)
+static inline void __percpu *__alloc_percpu(size_t size, size_t align)
{
/*
* Can't easily make larger alignment work with kmalloc. WARN
@@ -151,7 +158,7 @@
return kzalloc(size, GFP_KERNEL);
}
-static inline void free_percpu(void *p)
+static inline void free_percpu(void __percpu *p)
{
kfree(p);
}
@@ -171,7 +178,7 @@
#endif /* CONFIG_SMP */
#define alloc_percpu(type) \
- (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -188,17 +195,19 @@
#ifndef percpu_read
# define percpu_read(var) \
({ \
- typeof(per_cpu_var(var)) __tmp_var__; \
- __tmp_var__ = get_cpu_var(var); \
- put_cpu_var(var); \
- __tmp_var__; \
+ typeof(var) *pr_ptr__ = &(var); \
+ typeof(var) pr_ret__; \
+ pr_ret__ = get_cpu_var(*pr_ptr__); \
+ put_cpu_var(*pr_ptr__); \
+ pr_ret__; \
})
#endif
#define __percpu_generic_to_op(var, val, op) \
do { \
- get_cpu_var(var) op val; \
- put_cpu_var(var); \
+ typeof(var) *pgto_ptr__ = &(var); \
+ get_cpu_var(*pgto_ptr__) op val; \
+ put_cpu_var(*pgto_ptr__); \
} while (0)
#ifndef percpu_write
@@ -234,6 +243,7 @@
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable);break; \
case 2: pscr_ret__ = stem##2(variable);break; \
@@ -247,6 +257,7 @@
#define __pcpu_size_call(stem, variable, ...) \
do { \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: stem##1(variable, __VA_ARGS__);break; \
case 2: stem##2(variable, __VA_ARGS__);break; \
@@ -259,8 +270,7 @@
/*
* Optimized manipulation for memory allocated through the per cpu
- * allocator or for addresses of per cpu variables (can be determined
- * using per_cpu_var(xx).
+ * allocator or for addresses of per cpu variables.
*
* These operation guarantee exclusivity of access for other operations
* on the *same* processor. The assumption is that per cpu data is only
@@ -311,7 +321,7 @@
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
preempt_disable(); \
- *__this_cpu_ptr(&pcp) op val; \
+ *__this_cpu_ptr(&(pcp)) op val; \
preempt_enable(); \
} while (0)
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 794662b..c88d67b 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -21,7 +21,7 @@
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
#endif
- s32 *counters;
+ s32 __percpu *counters;
};
extern int percpu_counter_batch;
diff --git a/include/linux/range.h b/include/linux/range.h
new file mode 100644
index 0000000..bd184a5
--- /dev/null
+++ b/include/linux/range.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_RANGE_H
+#define _LINUX_RANGE_H
+
+struct range {
+ u64 start;
+ u64 end;
+};
+
+int add_range(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+
+int add_range_with_merge(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+void subtract_range(struct range *range, int az, u64 start, u64 end);
+
+int clean_sort_range(struct range *range, int az);
+
+void sort_range(struct range *range, int nr_range);
+
+#define MAX_RESOURCE ((resource_size_t)~0)
+static inline resource_size_t cap_resource(u64 val)
+{
+ if (val > MAX_RESOURCE)
+ return MAX_RESOURCE;
+
+ return val;
+}
+#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 2c627d3..233d20b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -76,7 +76,7 @@
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
-extern int cap_syslog(int type);
+extern int cap_syslog(int type, bool from_file);
extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
struct msghdr;
@@ -95,6 +95,8 @@
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+void reset_security_ops(void);
+
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
@@ -985,6 +987,7 @@
* Check permissions on incoming network packets. This hook is distinct
* from Netfilter's IP input hooks since it is the first time that the
* incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
* @sk contains the sock (not socket) associated with the incoming sk_buff.
* @skb contains the incoming network data.
* @socket_getpeersec_stream:
@@ -1348,6 +1351,7 @@
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
* @type contains the type of action.
+ * @from_file indicates the context of action (if it came from /proc).
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
@@ -1462,7 +1466,7 @@
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
int (*quota_on) (struct dentry *dentry);
- int (*syslog) (int type);
+ int (*syslog) (int type, bool from_file);
int (*settime) (struct timespec *ts, struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
@@ -1761,7 +1765,7 @@
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
-int security_syslog(int type);
+int security_syslog(int type, bool from_file);
int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
@@ -2007,9 +2011,9 @@
return 0;
}
-static inline int security_syslog(int type)
+static inline int security_syslog(int type, bool from_file)
{
- return cap_syslog(type);
+ return cap_syslog(type, from_file);
}
static inline int security_settime(struct timespec *ts, struct timezone *tz)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 3084f80..4d5ecb2 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,7 +33,7 @@
struct srcu_struct {
int completed;
- struct srcu_struct_array *per_cpu_ref;
+ struct srcu_struct_array __percpu *per_cpu_ref;
struct mutex mutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
new file mode 100644
index 0000000..3891139
--- /dev/null
+++ b/include/linux/syslog.h
@@ -0,0 +1,52 @@
+/* Syslog internals
+ *
+ * Copyright 2010 Canonical, Ltd.
+ * Author: Kees Cook <kees.cook@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_SYSLOG_H
+#define _LINUX_SYSLOG_H
+
+/* Close the log. Currently a NOP. */
+#define SYSLOG_ACTION_CLOSE 0
+/* Open the log. Currently a NOP. */
+#define SYSLOG_ACTION_OPEN 1
+/* Read from the log. */
+#define SYSLOG_ACTION_READ 2
+/* Read all messages remaining in the ring buffer. */
+#define SYSLOG_ACTION_READ_ALL 3
+/* Read and clear all messages remaining in the ring buffer */
+#define SYSLOG_ACTION_READ_CLEAR 4
+/* Clear ring buffer. */
+#define SYSLOG_ACTION_CLEAR 5
+/* Disable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_OFF 6
+/* Enable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_ON 7
+/* Set level of messages printed to console */
+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
+/* Return number of unread characters in the log buffer */
+#define SYSLOG_ACTION_SIZE_UNREAD 9
+/* Return size of the log buffer */
+#define SYSLOG_ACTION_SIZE_BUFFER 10
+
+#define SYSLOG_FROM_CALL 0
+#define SYSLOG_FROM_FILE 1
+
+int do_syslog(int type, char __user *buf, int count, bool from_file);
+
+#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6abfcf5..d96e588 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -68,6 +68,16 @@
unsigned long data[0];
};
+/*
+ * We default to dicing tty buffer allocations to this many characters
+ * in order to avoid multiple page allocations. We assume tty_buffer itself
+ * is under 256 bytes. See tty_buffer_find for the allocation logic this
+ * must match
+ */
+
+#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2)
+
+
struct tty_bufhead {
struct delayed_work work;
spinlock_t lock;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index eb677cf..9239d03 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -2,8 +2,8 @@
#define _LINUX_TTY_FLIP_H
extern int tty_buffer_request_room(struct tty_struct *tty, size_t size);
-extern int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size);
extern int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size);
+extern int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, const unsigned char *chars, char flag, size_t size);
extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
void tty_schedule_flip(struct tty_struct *tty);
@@ -20,4 +20,9 @@
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
+static inline int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size)
+{
+ return tty_insert_flip_string_fixed_flag(tty, chars, TTY_NORMAL, size);
+}
+
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 332eaea..3492abf 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -122,7 +122,6 @@
* number from the USB core by calling usb_register_dev().
* @condition: binding state of the interface: not bound, binding
* (in probe()), bound to a driver, or unbinding (in disconnect())
- * @is_active: flag set when the interface is bound and not suspended.
* @sysfs_files_created: sysfs attributes exist
* @ep_devs_created: endpoint child pseudo-devices exist
* @unregistering: flag set when the interface is being unregistered
@@ -135,8 +134,7 @@
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
- * allowed unless the counter is 0.
+ * @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @reset_running: set to 1 if the interface is currently running a
* queued reset so that usb_cancel_queued_reset() doesn't try to
@@ -184,7 +182,6 @@
int minor; /* minor number this interface is
* bound to */
enum usb_interface_condition condition; /* state of binding */
- unsigned is_active:1; /* the interface is not suspended */
unsigned sysfs_files_created:1; /* the sysfs attributes exist */
unsigned ep_devs_created:1; /* endpoint "devices" exist */
unsigned unregistering:1; /* unregistration is in progress */
@@ -401,7 +398,6 @@
* @portnum: parent port number (origin 1)
* @level: number of USB hub ancestors
* @can_submit: URBs may be submitted
- * @discon_suspended: disconnected while suspended
* @persist_enabled: USB_PERSIST enabled for this device
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
@@ -421,20 +417,15 @@
* @usbfs_dentry: usbfs dentry entry for the device
* @maxchild: number of ports if hub
* @children: child devices - USB devices that are attached to this hub
- * @pm_usage_cnt: usage counter for autosuspend
* @quirks: quirks of the whole device
* @urbnum: number of URBs submitted for the whole device
* @active_duration: total time device is not suspended
- * @autosuspend: for delayed autosuspends
- * @autoresume: for autoresumes requested while in_interrupt
- * @pm_mutex: protects PM operations
* @last_busy: time of last use
* @autosuspend_delay: in jiffies
* @connect_time: time device was first connected
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @autosuspend_disabled: autosuspend disabled by the user
- * @skip_sys_resume: skip the next system resume
* @wusb_dev: if this is a Wireless USB device, link to the WUSB
* specific data for the device.
* @slot_id: Slot ID assigned by xHCI
@@ -475,7 +466,6 @@
u8 level;
unsigned can_submit:1;
- unsigned discon_suspended:1;
unsigned persist_enabled:1;
unsigned have_langid:1;
unsigned authorized:1;
@@ -499,17 +489,12 @@
int maxchild;
struct usb_device *children[USB_MAXCHILDREN];
- int pm_usage_cnt;
u32 quirks;
atomic_t urbnum;
unsigned long active_duration;
#ifdef CONFIG_PM
- struct delayed_work autosuspend;
- struct work_struct autoresume;
- struct mutex pm_mutex;
-
unsigned long last_busy;
int autosuspend_delay;
unsigned long connect_time;
@@ -517,7 +502,6 @@
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned autosuspend_disabled:1;
- unsigned skip_sys_resume:1;
#endif
struct wusb_dev *wusb_dev;
int slot_id;
@@ -542,21 +526,15 @@
/* USB autosuspend and autoresume */
#ifdef CONFIG_USB_SUSPEND
+extern int usb_enable_autosuspend(struct usb_device *udev);
+extern int usb_disable_autosuspend(struct usb_device *udev);
+
extern int usb_autopm_get_interface(struct usb_interface *intf);
extern void usb_autopm_put_interface(struct usb_interface *intf);
extern int usb_autopm_get_interface_async(struct usb_interface *intf);
extern void usb_autopm_put_interface_async(struct usb_interface *intf);
-
-static inline void usb_autopm_get_interface_no_resume(
- struct usb_interface *intf)
-{
- atomic_inc(&intf->pm_usage_cnt);
-}
-static inline void usb_autopm_put_interface_no_suspend(
- struct usb_interface *intf)
-{
- atomic_dec(&intf->pm_usage_cnt);
-}
+extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
+extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
static inline void usb_mark_last_busy(struct usb_device *udev)
{
@@ -565,6 +543,11 @@
#else
+static inline int usb_enable_autosuspend(struct usb_device *udev)
+{ return 0; }
+static inline int usb_disable_autosuspend(struct usb_device *udev)
+{ return 0; }
+
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
@@ -1583,14 +1566,18 @@
extern void usb_unregister_notify(struct notifier_block *nb);
#ifdef DEBUG
-#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \
- __FILE__ , ## arg)
+#define dbg(format, arg...) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg)
#else
-#define dbg(format, arg...) do {} while (0)
+#define dbg(format, arg...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
#endif
-#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
- format "\n" , ## arg)
+#define err(format, arg...) \
+ printk(KERN_ERR KBUILD_MODNAME ": " format "\n", ##arg)
/* debugfs stuff */
extern struct dentry *usb_debug_root;
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 54c4463..29fd73b 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -5,4 +5,3 @@
header-y += midi.h
header-y += g_printer.h
header-y += tmc.h
-header-y += vstusb.h
diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h
index 6311fa2..baf41c8 100644
--- a/include/linux/usb/atmel_usba_udc.h
+++ b/include/linux/usb/atmel_usba_udc.h
@@ -15,6 +15,7 @@
struct usba_platform_data {
int vbus_pin;
+ int vbus_pin_inverted;
int num_ep;
struct usba_ep_data ep[0];
};
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 94012e6..e58369f 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -775,7 +775,7 @@
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
USB_SPEED_HIGH, /* usb 2.0 */
- USB_SPEED_VARIABLE, /* wireless (usb 2.5) */
+ USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
USB_SPEED_SUPER, /* usb 3.0 */
};
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 5dc2f22..7acef02 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -30,26 +30,26 @@
struct musb_hdrc_config {
/* MUSB configuration-specific details */
unsigned multipoint:1; /* multipoint device */
- unsigned dyn_fifo:1; /* supports dynamic fifo sizing */
- unsigned soft_con:1; /* soft connect required */
- unsigned utm_16:1; /* utm data witdh is 16 bits */
+ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
+ unsigned soft_con:1 __deprecated; /* soft connect required */
+ unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */
unsigned big_endian:1; /* true if CPU uses big-endian */
unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
unsigned high_iso_tx:1; /* Tx ep required for HB iso */
unsigned high_iso_rx:1; /* Rx ep required for HD iso */
- unsigned dma:1; /* supports DMA */
- unsigned vendor_req:1; /* vendor registers required */
+ unsigned dma:1 __deprecated; /* supports DMA */
+ unsigned vendor_req:1 __deprecated; /* vendor registers required */
u8 num_eps; /* number of endpoints _with_ ep0 */
- u8 dma_channels; /* number of dma channels */
+ u8 dma_channels __deprecated; /* number of dma channels */
u8 dyn_fifo_size; /* dynamic size in bytes */
- u8 vendor_ctrl; /* vendor control reg width */
- u8 vendor_stat; /* vendor status reg witdh */
- u8 dma_req_chan; /* bitmask for required dma channels */
+ u8 vendor_ctrl __deprecated; /* vendor control reg width */
+ u8 vendor_stat __deprecated; /* vendor status reg witdh */
+ u8 dma_req_chan __deprecated; /* bitmask for required dma channels */
u8 ram_bits; /* ram address size */
- struct musb_hdrc_eps_bits *eps_bits;
+ struct musb_hdrc_eps_bits *eps_bits __deprecated;
#ifdef CONFIG_BLACKFIN
/* A GPIO controlling VRSEL in Blackfin */
unsigned int gpio_vrsel;
@@ -76,6 +76,9 @@
/* (HOST or OTG) msec/2 after VBUS on till power good */
u8 potpgt;
+ /* (HOST or OTG) program PHY for external Vbus */
+ unsigned extvbus:1;
+
/* Power the device on or off */
int (*set_power)(int state);
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index fef0972..f8302d0 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,6 +9,8 @@
#ifndef __LINUX_USB_OTG_H
#define __LINUX_USB_OTG_H
+#include <linux/notifier.h>
+
/* OTG defines lots of enumeration states before device reset */
enum usb_otg_state {
OTG_STATE_UNDEFINED = 0,
@@ -33,6 +35,14 @@
OTG_STATE_A_VBUS_ERR,
};
+enum usb_xceiv_events {
+ USB_EVENT_NONE, /* no events or cable disconnected */
+ USB_EVENT_VBUS, /* vbus valid event */
+ USB_EVENT_ID, /* id was grounded */
+ USB_EVENT_CHARGER, /* usb dedicated charger */
+ USB_EVENT_ENUMERATED, /* gadget driver enumerated */
+};
+
#define USB_OTG_PULLUP_ID (1 << 0)
#define USB_OTG_PULLDOWN_DP (1 << 1)
#define USB_OTG_PULLDOWN_DM (1 << 2)
@@ -70,6 +80,9 @@
struct otg_io_access_ops *io_ops;
void __iomem *io_priv;
+ /* for notification of usb_xceiv_events */
+ struct blocking_notifier_head notifier;
+
/* to pass extra port status to the root hub */
u16 port_status;
u16 port_change;
@@ -213,6 +226,18 @@
return otg->start_srp(otg);
}
+/* notifiers */
+static inline int
+otg_register_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&otg->notifier, nb);
+}
+
+static inline void
+otg_unregister_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&otg->notifier, nb);
+}
/* for OTG controller drivers (and maybe other stuff) */
extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 2526f3b..0a555dd 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -19,4 +19,7 @@
/* device can't handle its Configuration or Interface strings */
#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
+/*device will morph if reset, don't use reset for handling errors */
+#define USB_QUIRK_RESET_MORPHS 0x00000010
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1819396..0a458b8 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -351,14 +351,11 @@
/* Use our own dbg macro */
#undef dbg
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , \
- ## arg); \
- } while (0)
-
-
+#define dbg(format, arg...) \
+do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
#endif /* __LINUX_USB_SERIAL_H */
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
deleted file mode 100644
index 1cfac67..0000000
--- a/include/linux/usb/vstusb.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*****************************************************************************
- * File: drivers/usb/misc/vstusb.h
- *
- * Purpose: Support for the bulk USB Vernier Spectrophotometers
- *
- * Author: EQware Engineering, Inc.
- * Oregon City, OR, USA 97045
- *
- * Copyright: 2007, 2008
- * Vernier Software & Technology
- * Beaverton, OR, USA 97005
- *
- * Web: www.vernier.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *****************************************************************************/
-/*****************************************************************************
- *
- * The vstusb module is a standard usb 'client' driver running on top of the
- * standard usb host controller stack.
- *
- * In general, vstusb supports standard bulk usb pipes. It supports multiple
- * devices and multiple pipes per device.
- *
- * The vstusb driver supports two interfaces:
- * 1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
- * interface to any pipe with timeout support;
- * 2 - standard read/write with ioctl config - offers standard read/write
- * interface with ioctl configured pipes and timeouts.
- *
- * Both interfaces can be signal from other process and will abort its i/o
- * operation.
- *
- * A timeout of 0 means NO timeout. The user can still terminate the read via
- * signal.
- *
- * If using multiple threads with this driver, the user should ensure that
- * any reads, writes, or ioctls are complete before closing the device.
- * Changing read/write timeouts or pipes takes effect on next read/write.
- *
- *****************************************************************************/
-
-struct vstusb_args {
- union {
- /* this struct is used for IOCTL_VSTUSB_SEND_PIPE, *
- * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops */
- struct {
- void __user *buffer;
- size_t count;
- unsigned int timeout_ms;
- int pipe;
- };
-
- /* this one is used for IOCTL_VSTUSB_CONFIG_RW */
- struct {
- int rd_pipe;
- int rd_timeout_ms;
- int wr_pipe;
- int wr_timeout_ms;
- };
- };
-};
-
-#define VST_IOC_MAGIC 'L'
-#define VST_IOC_FIRST 0x20
-#define IOCTL_VSTUSB_SEND_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST)
-#define IOCTL_VSTUSB_RECV_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
-#define IOCTL_VSTUSB_CONFIG_RW _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ee03bba..117f0dd 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -78,22 +78,22 @@
static inline void __count_vm_event(enum vm_event_item item)
{
- __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ __this_cpu_inc(vm_event_states.event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ this_cpu_inc(vm_event_states.event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ __this_cpu_add(vm_event_states.event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ this_cpu_add(vm_event_states.event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index d5dd0bc..778b7b2 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -27,7 +27,7 @@
#define VT_SETMODE 0x5602 /* set mode of active vt */
#define VT_AUTO 0x00 /* auto vt switching */
#define VT_PROCESS 0x01 /* process controls switching */
-#define VT_ACKACQ 0x02 /* acknowledge switch */
+#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */
struct vt_stat {
unsigned short v_active; /* active vt */
@@ -38,6 +38,7 @@
#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */
#define VT_RELDISP 0x5605 /* release display */
+#define VT_ACKACQ 0x02 /* acknowledge switch */
#define VT_ACTIVATE 0x5606 /* make vt active */
#define VT_WAITACTIVE 0x5607 /* wait for vt active */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index d7fc45c..cbb50f4 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -232,6 +232,7 @@
void ib_ud_header_init(int payload_bytes,
int grh_present,
+ int immediate_present,
struct ib_ud_header *header);
int ib_ud_header_pack(struct ib_ud_header *header,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 09509ed..a585e0f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -984,9 +984,9 @@
struct list_head event_handler_list;
spinlock_t event_handler_lock;
+ spinlock_t client_data_lock;
struct list_head core_list;
struct list_head client_data_list;
- spinlock_t client_data_lock;
struct ib_cache cache;
int *pkey_tbl_len;
@@ -1144,8 +1144,8 @@
IB_DEV_UNREGISTERED
} reg_state;
- u64 uverbs_cmd_mask;
int uverbs_abi_ver;
+ u64 uverbs_cmd_mask;
char node_desc[64];
__be64 node_guid;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c6b2962..4fae903 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -67,7 +67,6 @@
RDMA_PS_IPOIB = 0x0002,
RDMA_PS_TCP = 0x0106,
RDMA_PS_UDP = 0x0111,
- RDMA_PS_SCTP = 0x0183
};
struct rdma_addr {
diff --git a/init/main.c b/init/main.c
index c75dcd6..1809815 100644
--- a/init/main.c
+++ b/init/main.c
@@ -149,6 +149,20 @@
early_param("nosmp", nosmp);
+/* this is hard limit */
+static int __init nrcpus(char *str)
+{
+ int nr_cpus;
+
+ get_option(&str, &nr_cpus);
+ if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
+ nr_cpu_ids = nr_cpus;
+
+ return 0;
+}
+
+early_param("nr_cpus", nrcpus);
+
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
@@ -586,6 +600,7 @@
local_irq_disable();
}
rcu_init();
+ radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
@@ -659,7 +674,6 @@
proc_caches_init();
buffer_init();
key_init();
- radix_tree_init();
security_init();
vfs_caches_init(totalram_pages);
signals_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index 6aebdeb..7b97469 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,8 @@
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
- async.o
+ async.o range.o
+obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/capability.c b/kernel/capability.c
index 7f876e6..9e4697e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -135,7 +135,7 @@
if (pid && (pid != task_pid_vnr(current))) {
struct task_struct *target;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
target = find_task_by_vpid(pid);
if (!target)
@@ -143,7 +143,7 @@
else
ret = security_capget(target, pEp, pIp, pPp);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else
ret = security_capget(current, pEp, pIp, pPp);
diff --git a/kernel/early_res.c b/kernel/early_res.c
new file mode 100644
index 0000000..3cb2c66
--- /dev/null
+++ b/kernel/early_res.c
@@ -0,0 +1,578 @@
+/*
+ * early_res, could be used to replace bootmem
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/early_res.h>
+
+/*
+ * Early reserved memory areas.
+ */
+/*
+ * need to make sure this one is bigger enough before
+ * find_fw_memmap_area could be used
+ */
+#define MAX_EARLY_RES_X 32
+
+struct early_res {
+ u64 start, end;
+ char name[15];
+ char overlap_ok;
+};
+static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
+
+static int max_early_res __initdata = MAX_EARLY_RES_X;
+static struct early_res *early_res __initdata = &early_res_x[0];
+static int early_res_count __initdata;
+
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct early_res *r;
+
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ r = &early_res[i];
+ if (end > r->start && start < r->end)
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * Drop the i-th range from the early reservation map,
+ * by copying any higher ranges down one over it, and
+ * clearing what had been the last slot.
+ */
+static void __init drop_range(int i)
+{
+ int j;
+
+ for (j = i + 1; j < max_early_res && early_res[j].end; j++)
+ ;
+
+ memmove(&early_res[i], &early_res[i + 1],
+ (j - 1 - i) * sizeof(struct early_res));
+
+ early_res[j - 1].end = 0;
+ early_res_count--;
+}
+
+static void __init drop_range_partial(int i, u64 start, u64 end)
+{
+ u64 common_start, common_end;
+ u64 old_start, old_end;
+
+ old_start = early_res[i].start;
+ old_end = early_res[i].end;
+ common_start = max(old_start, start);
+ common_end = min(old_end, end);
+
+ /* no overlap ? */
+ if (common_start >= common_end)
+ return;
+
+ if (old_start < common_start) {
+ /* make head segment */
+ early_res[i].end = common_start;
+ if (old_end > common_end) {
+ char name[15];
+
+ /*
+ * Save a local copy of the name, since the
+ * early_res array could get resized inside
+ * reserve_early_without_check() ->
+ * __check_and_double_early_res(), which would
+ * make the current name pointer invalid.
+ */
+ strncpy(name, early_res[i].name,
+ sizeof(early_res[i].name) - 1);
+ /* add another for left over on tail */
+ reserve_early_without_check(common_end, old_end, name);
+ }
+ return;
+ } else {
+ if (old_end > common_end) {
+ /* reuse the entry for tail left */
+ early_res[i].start = common_end;
+ return;
+ }
+ /* all covered */
+ drop_range(i);
+ }
+}
+
+/*
+ * Split any existing ranges that:
+ * 1) are marked 'overlap_ok', and
+ * 2) overlap with the stated range [start, end)
+ * into whatever portion (if any) of the existing range is entirely
+ * below or entirely above the stated range. Drop the portion
+ * of the existing range that overlaps with the stated range,
+ * which will allow the caller of this routine to then add that
+ * stated range without conflicting with any existing range.
+ */
+static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
+{
+ int i;
+ struct early_res *r;
+ u64 lower_start, lower_end;
+ u64 upper_start, upper_end;
+ char name[15];
+
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ r = &early_res[i];
+
+ /* Continue past non-overlapping ranges */
+ if (end <= r->start || start >= r->end)
+ continue;
+
+ /*
+ * Leave non-ok overlaps as is; let caller
+ * panic "Overlapping early reservations"
+ * when it hits this overlap.
+ */
+ if (!r->overlap_ok)
+ return;
+
+ /*
+ * We have an ok overlap. We will drop it from the early
+ * reservation map, and add back in any non-overlapping
+ * portions (lower or upper) as separate, overlap_ok,
+ * non-overlapping ranges.
+ */
+
+ /* 1. Note any non-overlapping (lower or upper) ranges. */
+ strncpy(name, r->name, sizeof(name) - 1);
+
+ lower_start = lower_end = 0;
+ upper_start = upper_end = 0;
+ if (r->start < start) {
+ lower_start = r->start;
+ lower_end = start;
+ }
+ if (r->end > end) {
+ upper_start = end;
+ upper_end = r->end;
+ }
+
+ /* 2. Drop the original ok overlapping range */
+ drop_range(i);
+
+ i--; /* resume for-loop on copied down entry */
+
+ /* 3. Add back in any non-overlapping ranges. */
+ if (lower_end)
+ reserve_early_overlap_ok(lower_start, lower_end, name);
+ if (upper_end)
+ reserve_early_overlap_ok(upper_start, upper_end, name);
+ }
+}
+
+static void __init __reserve_early(u64 start, u64 end, char *name,
+ int overlap_ok)
+{
+ int i;
+ struct early_res *r;
+
+ i = find_overlapped_early(start, end);
+ if (i >= max_early_res)
+ panic("Too many early reservations");
+ r = &early_res[i];
+ if (r->end)
+ panic("Overlapping early reservations "
+ "%llx-%llx %s to %llx-%llx %s\n",
+ start, end - 1, name ? name : "", r->start,
+ r->end - 1, r->name);
+ r->start = start;
+ r->end = end;
+ r->overlap_ok = overlap_ok;
+ if (name)
+ strncpy(r->name, name, sizeof(r->name) - 1);
+ early_res_count++;
+}
+
+/*
+ * A few early reservtations come here.
+ *
+ * The 'overlap_ok' in the name of this routine does -not- mean it
+ * is ok for these reservations to overlap an earlier reservation.
+ * Rather it means that it is ok for subsequent reservations to
+ * overlap this one.
+ *
+ * Use this entry point to reserve early ranges when you are doing
+ * so out of "Paranoia", reserving perhaps more memory than you need,
+ * just in case, and don't mind a subsequent overlapping reservation
+ * that is known to be needed.
+ *
+ * The drop_overlaps_that_are_ok() call here isn't really needed.
+ * It would be needed if we had two colliding 'overlap_ok'
+ * reservations, so that the second such would not panic on the
+ * overlap with the first. We don't have any such as of this
+ * writing, but might as well tolerate such if it happens in
+ * the future.
+ */
+void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
+{
+ drop_overlaps_that_are_ok(start, end);
+ __reserve_early(start, end, name, 1);
+}
+
+static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
+{
+ u64 start, end, size, mem;
+ struct early_res *new;
+
+ /* do we have enough slots left ? */
+ if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
+ return;
+
+ /* double it */
+ mem = -1ULL;
+ size = sizeof(struct early_res) * max_early_res * 2;
+ if (early_res == early_res_x)
+ start = 0;
+ else
+ start = early_res[0].end;
+ end = ex_start;
+ if (start + size < end)
+ mem = find_fw_memmap_area(start, end, size,
+ sizeof(struct early_res));
+ if (mem == -1ULL) {
+ start = ex_end;
+ end = get_max_mapped();
+ if (start + size < end)
+ mem = find_fw_memmap_area(start, end, size,
+ sizeof(struct early_res));
+ }
+ if (mem == -1ULL)
+ panic("can not find more space for early_res array");
+
+ new = __va(mem);
+ /* save the first one for own */
+ new[0].start = mem;
+ new[0].end = mem + size;
+ new[0].overlap_ok = 0;
+ /* copy old to new */
+ if (early_res == early_res_x) {
+ memcpy(&new[1], &early_res[0],
+ sizeof(struct early_res) * max_early_res);
+ memset(&new[max_early_res+1], 0,
+ sizeof(struct early_res) * (max_early_res - 1));
+ early_res_count++;
+ } else {
+ memcpy(&new[1], &early_res[1],
+ sizeof(struct early_res) * (max_early_res - 1));
+ memset(&new[max_early_res], 0,
+ sizeof(struct early_res) * max_early_res);
+ }
+ memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
+ early_res = new;
+ max_early_res *= 2;
+ printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
+ max_early_res, mem, mem + size - 1);
+}
+
+/*
+ * Most early reservations come here.
+ *
+ * We first have drop_overlaps_that_are_ok() drop any pre-existing
+ * 'overlap_ok' ranges, so that we can then reserve this memory
+ * range without risk of panic'ing on an overlapping overlap_ok
+ * early reservation.
+ */
+void __init reserve_early(u64 start, u64 end, char *name)
+{
+ if (start >= end)
+ return;
+
+ __check_and_double_early_res(start, end);
+
+ drop_overlaps_that_are_ok(start, end);
+ __reserve_early(start, end, name, 0);
+}
+
+void __init reserve_early_without_check(u64 start, u64 end, char *name)
+{
+ struct early_res *r;
+
+ if (start >= end)
+ return;
+
+ __check_and_double_early_res(start, end);
+
+ r = &early_res[early_res_count];
+
+ r->start = start;
+ r->end = end;
+ r->overlap_ok = 0;
+ if (name)
+ strncpy(r->name, name, sizeof(r->name) - 1);
+ early_res_count++;
+}
+
+void __init free_early(u64 start, u64 end)
+{
+ struct early_res *r;
+ int i;
+
+ i = find_overlapped_early(start, end);
+ r = &early_res[i];
+ if (i >= max_early_res || r->end != end || r->start != start)
+ panic("free_early on not reserved area: %llx-%llx!",
+ start, end - 1);
+
+ drop_range(i);
+}
+
+void __init free_early_partial(u64 start, u64 end)
+{
+ struct early_res *r;
+ int i;
+
+try_next:
+ i = find_overlapped_early(start, end);
+ if (i >= max_early_res)
+ return;
+
+ r = &early_res[i];
+ /* hole ? */
+ if (r->end >= end && r->start <= start) {
+ drop_range_partial(i, start, end);
+ return;
+ }
+
+ drop_range_partial(i, start, end);
+ goto try_next;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+static void __init subtract_early_res(struct range *range, int az)
+{
+ int i, count;
+ u64 final_start, final_end;
+ int idx = 0;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ /* need to skip first one ?*/
+ if (early_res != early_res_x)
+ idx = 1;
+
+#define DEBUG_PRINT_EARLY_RES 1
+
+#if DEBUG_PRINT_EARLY_RES
+ printk(KERN_INFO "Subtract (%d early reservations)\n", count);
+#endif
+ for (i = idx; i < count; i++) {
+ struct early_res *r = &early_res[i];
+#if DEBUG_PRINT_EARLY_RES
+ printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
+ r->start, r->end, r->name);
+#endif
+ final_start = PFN_DOWN(r->start);
+ final_end = PFN_UP(r->end);
+ if (final_start >= final_end)
+ continue;
+ subtract_range(range, az, final_start, final_end);
+ }
+
+}
+
+int __init get_free_all_memory_range(struct range **rangep, int nodeid)
+{
+ int i, count;
+ u64 start = 0, end;
+ u64 size;
+ u64 mem;
+ struct range *range;
+ int nr_range;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ count *= 2;
+
+ size = sizeof(struct range) * count;
+ end = get_max_mapped();
+#ifdef MAX_DMA32_PFN
+ if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
+ start = MAX_DMA32_PFN << PAGE_SHIFT;
+#endif
+ mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ if (mem == -1ULL)
+ panic("can not find more space for range free");
+
+ range = __va(mem);
+ /* use early_node_map[] and early_res to get range array at first */
+ memset(range, 0, size);
+ nr_range = 0;
+
+ /* need to go over early_node_map to find out good range for node */
+ nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
+#ifdef CONFIG_X86_32
+ subtract_range(range, count, max_low_pfn, -1ULL);
+#endif
+ subtract_early_res(range, count);
+ nr_range = clean_sort_range(range, count);
+
+ /* need to clear it ? */
+ if (nodeid == MAX_NUMNODES) {
+ memset(&early_res[0], 0,
+ sizeof(struct early_res) * max_early_res);
+ early_res = NULL;
+ max_early_res = 0;
+ }
+
+ *rangep = range;
+ return nr_range;
+}
+#else
+void __init early_res_to_bootmem(u64 start, u64 end)
+{
+ int i, count;
+ u64 final_start, final_end;
+ int idx = 0;
+
+ count = 0;
+ for (i = 0; i < max_early_res && early_res[i].end; i++)
+ count++;
+
+ /* need to skip first one ?*/
+ if (early_res != early_res_x)
+ idx = 1;
+
+ printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
+ count - idx, max_early_res, start, end);
+ for (i = idx; i < count; i++) {
+ struct early_res *r = &early_res[i];
+ printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
+ r->start, r->end, r->name);
+ final_start = max(start, r->start);
+ final_end = min(end, r->end);
+ if (final_start >= final_end) {
+ printk(KERN_CONT "\n");
+ continue;
+ }
+ printk(KERN_CONT " ==> [%010llx - %010llx]\n",
+ final_start, final_end);
+ reserve_bootmem_generic(final_start, final_end - final_start,
+ BOOTMEM_DEFAULT);
+ }
+ /* clear them */
+ memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
+ early_res = NULL;
+ max_early_res = 0;
+ early_res_count = 0;
+}
+#endif
+
+/* Check for already reserved areas */
+static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+ int i;
+ u64 addr = *addrp;
+ int changed = 0;
+ struct early_res *r;
+again:
+ i = find_overlapped_early(addr, addr + size);
+ r = &early_res[i];
+ if (i < max_early_res && r->end) {
+ *addrp = addr = round_up(r->end, align);
+ changed = 1;
+ goto again;
+ }
+ return changed;
+}
+
+/* Check for already reserved areas */
+static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
+{
+ int i;
+ u64 addr = *addrp, last;
+ u64 size = *sizep;
+ int changed = 0;
+again:
+ last = addr + size;
+ for (i = 0; i < max_early_res && early_res[i].end; i++) {
+ struct early_res *r = &early_res[i];
+ if (last > r->start && addr < r->start) {
+ size = r->start - addr;
+ changed = 1;
+ goto again;
+ }
+ if (last > r->end && addr < r->end) {
+ addr = round_up(r->end, align);
+ size = last - addr;
+ changed = 1;
+ goto again;
+ }
+ if (last <= r->end && addr >= r->start) {
+ (*sizep)++;
+ return 0;
+ }
+ }
+ if (changed) {
+ *addrp = addr;
+ *sizep = size;
+ }
+ return changed;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ * only with the area.between start to end is active range from early_node_map
+ * so they are good as RAM
+ */
+u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+ ;
+ last = addr + size;
+ if (last > ei_last)
+ goto out;
+ if (last > end)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
+
+u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
+ u64 *sizep, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ *sizep = ei_last - addr;
+ while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
+ ;
+ last = addr + *sizep;
+ if (last > ei_last)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ecc3fa2..d70394f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,11 +18,7 @@
#include "internals.h"
-/**
- * dynamic_irq_init - initialize a dynamically allocated irq
- * @irq: irq number to initialize
- */
-void dynamic_irq_init(unsigned int irq)
+static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc;
unsigned long flags;
@@ -41,7 +37,8 @@
desc->depth = 1;
desc->msi_desc = NULL;
desc->handler_data = NULL;
- desc->chip_data = NULL;
+ if (!keep_chip_data)
+ desc->chip_data = NULL;
desc->action = NULL;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
@@ -55,10 +52,26 @@
}
/**
- * dynamic_irq_cleanup - cleanup a dynamically allocated irq
+ * dynamic_irq_init - initialize a dynamically allocated irq
* @irq: irq number to initialize
*/
-void dynamic_irq_cleanup(unsigned int irq)
+void dynamic_irq_init(unsigned int irq)
+{
+ dynamic_irq_init_x(irq, false);
+}
+
+/**
+ * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
+ * @irq: irq number to initialize
+ *
+ * does not set irq_to_desc(irq)->chip_data to NULL
+ */
+void dynamic_irq_init_keep_chip_data(unsigned int irq)
+{
+ dynamic_irq_init_x(irq, true);
+}
+
+static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -77,7 +90,8 @@
}
desc->msi_desc = NULL;
desc->handler_data = NULL;
- desc->chip_data = NULL;
+ if (!keep_chip_data)
+ desc->chip_data = NULL;
desc->handle_irq = handle_bad_irq;
desc->chip = &no_irq_chip;
desc->name = NULL;
@@ -85,6 +99,26 @@
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+/**
+ * dynamic_irq_cleanup - cleanup a dynamically allocated irq
+ * @irq: irq number to initialize
+ */
+void dynamic_irq_cleanup(unsigned int irq)
+{
+ dynamic_irq_cleanup_x(irq, false);
+}
+
+/**
+ * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
+ * @irq: irq number to initialize
+ *
+ * does not set irq_to_desc(irq)->chip_data to NULL
+ */
+void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
+{
+ dynamic_irq_cleanup_x(irq, true);
+}
+
/**
* set_irq_chip - set the irq chip for an irq
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 814940e..76d5a67 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -19,7 +19,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
-#include <linux/bootmem.h>
+#include <linux/radix-tree.h>
#include <trace/events/irq.h>
#include "internals.h"
@@ -87,12 +87,8 @@
{
void *ptr;
- if (slab_is_available())
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
- GFP_ATOMIC, node);
- else
- ptr = alloc_bootmem_node(NODE_DATA(node),
- nr * sizeof(*desc->kstat_irqs));
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+ GFP_ATOMIC, node);
/*
* don't overwite if can not get new one
@@ -132,7 +128,26 @@
*/
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
-struct irq_desc **irq_desc_ptrs __read_mostly;
+static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
+
+static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ radix_tree_insert(&irq_desc_tree, irq, desc);
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return radix_tree_lookup(&irq_desc_tree, irq);
+}
+
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ void **ptr;
+
+ ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
+ if (ptr)
+ radix_tree_replace_slot(ptr, desc);
+}
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = {
@@ -164,9 +179,6 @@
legacy_count = ARRAY_SIZE(irq_desc_legacy);
node = first_online_node;
- /* allocate irq_desc_ptrs array based on nr_irqs */
- irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
-
/* allocate based on nr_cpu_ids */
kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
sizeof(int), GFP_NOWAIT, node);
@@ -180,23 +192,12 @@
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
alloc_desc_masks(&desc[i], node, true);
init_desc_masks(&desc[i]);
- irq_desc_ptrs[i] = desc + i;
+ set_irq_desc(i, &desc[i]);
}
- for (i = legacy_count; i < nr_irqs; i++)
- irq_desc_ptrs[i] = NULL;
-
return arch_early_irq_init();
}
-struct irq_desc *irq_to_desc(unsigned int irq)
-{
- if (irq_desc_ptrs && irq < nr_irqs)
- return irq_desc_ptrs[irq];
-
- return NULL;
-}
-
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
@@ -208,21 +209,18 @@
return NULL;
}
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc)
return desc;
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc)
goto out_unlock;
- if (slab_is_available())
- desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
- else
- desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
if (!desc) {
@@ -231,7 +229,7 @@
}
init_one_irq_desc(irq, desc, node);
- irq_desc_ptrs[irq] = desc;
+ set_irq_desc(irq, desc);
out_unlock:
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2821f0..c63f3bc 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -21,11 +21,7 @@
extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
-/* irq_desc_ptrs allocated at boot time */
-extern struct irq_desc **irq_desc_ptrs;
-#else
-/* irq_desc_ptrs is a fixed size array */
-extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
#endif
#ifdef CONFIG_PROC_FS
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 26bac9d..963559d 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -70,7 +70,7 @@
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
- desc = irq_desc_ptrs[irq];
+ desc = irq_to_desc(irq);
if (desc && old_desc != desc)
goto out_unlock;
@@ -90,7 +90,7 @@
goto out_unlock;
}
- irq_desc_ptrs[irq] = desc;
+ replace_irq_desc(irq, desc);
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ef077fb..87ebe8a 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -41,7 +41,7 @@
#include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */
-note_buf_t* crash_notes;
+note_buf_t __percpu *crash_notes;
/* vmcoreinfo stuff */
static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
diff --git a/kernel/module.c b/kernel/module.c
index f82386b..e5538d5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -474,9 +474,10 @@
INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu)
- local_set(__module_ref_addr(mod, cpu), 0);
+ per_cpu_ptr(mod->refptr, cpu)->count = 0;
+
/* Hold reference count during initialization. */
- local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
+ __this_cpu_write(mod->refptr->count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
@@ -619,7 +620,7 @@
int cpu;
for_each_possible_cpu(cpu)
- total += local_read(__module_ref_addr(mod, cpu));
+ total += per_cpu_ptr(mod->refptr, cpu)->count;
return total;
}
EXPORT_SYMBOL(module_refcount);
@@ -796,14 +797,15 @@
void module_put(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_dec(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_dec(module->refptr->count);
+
trace_module_put(module, _RET_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
- put_cpu();
+ preempt_enable();
}
}
EXPORT_SYMBOL(module_put);
@@ -1397,9 +1399,9 @@
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+#if defined(CONFIG_MODULE_UNLOAD)
if (mod->refptr)
- percpu_modfree(mod->refptr);
+ free_percpu(mod->refptr);
#endif
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2162,9 +2164,8 @@
mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
- mod->name);
+#if defined(CONFIG_MODULE_UNLOAD)
+ mod->refptr = alloc_percpu(struct module_ref);
if (!mod->refptr) {
err = -ENOMEM;
goto free_init;
@@ -2396,8 +2397,8 @@
kobject_put(&mod->mkobj.kobj);
free_unload:
module_unload_free(mod);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- percpu_modfree(mod->refptr);
+#if defined(CONFIG_MODULE_UNLOAD)
+ free_percpu(mod->refptr);
free_init:
#endif
module_free(mod, mod->module_init);
diff --git a/kernel/printk.c b/kernel/printk.c
index 1751c45..4067412 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -35,6 +35,7 @@
#include <linux/kexec.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
+#include <linux/syslog.h>
#include <asm/uaccess.h>
@@ -258,38 +259,23 @@
}
#endif
-/*
- * Commands to do_syslog:
- *
- * 0 -- Close the log. Currently a NOP.
- * 1 -- Open the log. Currently a NOP.
- * 2 -- Read from the log.
- * 3 -- Read all messages remaining in the ring buffer.
- * 4 -- Read and clear all messages remaining in the ring buffer
- * 5 -- Clear ring buffer.
- * 6 -- Disable printk's to console
- * 7 -- Enable printk's to console
- * 8 -- Set level of messages printed to console
- * 9 -- Return number of unread characters in the log buffer
- * 10 -- Return size of the log buffer
- */
-int do_syslog(int type, char __user *buf, int len)
+int do_syslog(int type, char __user *buf, int len, bool from_file)
{
unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
- error = security_syslog(type);
+ error = security_syslog(type, from_file);
if (error)
return error;
switch (type) {
- case 0: /* Close log */
+ case SYSLOG_ACTION_CLOSE: /* Close log */
break;
- case 1: /* Open log */
+ case SYSLOG_ACTION_OPEN: /* Open log */
break;
- case 2: /* Read from log */
+ case SYSLOG_ACTION_READ: /* Read from log */
error = -EINVAL;
if (!buf || len < 0)
goto out;
@@ -320,10 +306,12 @@
if (!error)
error = i;
break;
- case 4: /* Read/clear last kernel messages */
+ /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_READ_CLEAR:
do_clear = 1;
/* FALL THRU */
- case 3: /* Read last kernel messages */
+ /* Read last kernel messages */
+ case SYSLOG_ACTION_READ_ALL:
error = -EINVAL;
if (!buf || len < 0)
goto out;
@@ -376,21 +364,25 @@
}
}
break;
- case 5: /* Clear ring buffer */
+ /* Clear ring buffer */
+ case SYSLOG_ACTION_CLEAR:
logged_chars = 0;
break;
- case 6: /* Disable logging to console */
+ /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == -1)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
- case 7: /* Enable logging to console */
+ /* Enable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON:
if (saved_console_loglevel != -1) {
console_loglevel = saved_console_loglevel;
saved_console_loglevel = -1;
}
break;
- case 8: /* Set level of messages printed to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
error = -EINVAL;
if (len < 1 || len > 8)
goto out;
@@ -401,10 +393,12 @@
saved_console_loglevel = -1;
error = 0;
break;
- case 9: /* Number of chars in the log buffer */
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
error = log_end - log_start;
break;
- case 10: /* Size of the log buffer */
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
error = log_buf_len;
break;
default:
@@ -417,7 +411,7 @@
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
- return do_syslog(type, buf, len);
+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
/*
diff --git a/kernel/range.c b/kernel/range.c
new file mode 100644
index 0000000..74e2e61
--- /dev/null
+++ b/kernel/range.c
@@ -0,0 +1,163 @@
+/*
+ * Range add and subtract
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sort.h>
+
+#include <linux/range.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
+{
+ if (start >= end)
+ return nr_range;
+
+ /* Out of slots: */
+ if (nr_range >= az)
+ return nr_range;
+
+ range[nr_range].start = start;
+ range[nr_range].end = end;
+
+ nr_range++;
+
+ return nr_range;
+}
+
+int add_range_with_merge(struct range *range, int az, int nr_range,
+ u64 start, u64 end)
+{
+ int i;
+
+ if (start >= end)
+ return nr_range;
+
+ /* Try to merge it with old one: */
+ for (i = 0; i < nr_range; i++) {
+ u64 final_start, final_end;
+ u64 common_start, common_end;
+
+ if (!range[i].end)
+ continue;
+
+ common_start = max(range[i].start, start);
+ common_end = min(range[i].end, end);
+ if (common_start > common_end)
+ continue;
+
+ final_start = min(range[i].start, start);
+ final_end = max(range[i].end, end);
+
+ range[i].start = final_start;
+ range[i].end = final_end;
+ return nr_range;
+ }
+
+ /* Need to add it: */
+ return add_range(range, az, nr_range, start, end);
+}
+
+void subtract_range(struct range *range, int az, u64 start, u64 end)
+{
+ int i, j;
+
+ if (start >= end)
+ return;
+
+ for (j = 0; j < az; j++) {
+ if (!range[j].end)
+ continue;
+
+ if (start <= range[j].start && end >= range[j].end) {
+ range[j].start = 0;
+ range[j].end = 0;
+ continue;
+ }
+
+ if (start <= range[j].start && end < range[j].end &&
+ range[j].start < end) {
+ range[j].start = end;
+ continue;
+ }
+
+
+ if (start > range[j].start && end >= range[j].end &&
+ range[j].end > start) {
+ range[j].end = start;
+ continue;
+ }
+
+ if (start > range[j].start && end < range[j].end) {
+ /* Find the new spare: */
+ for (i = 0; i < az; i++) {
+ if (range[i].end == 0)
+ break;
+ }
+ if (i < az) {
+ range[i].end = range[j].end;
+ range[i].start = end;
+ } else {
+ printk(KERN_ERR "run of slot in ranges\n");
+ }
+ range[j].end = start;
+ continue;
+ }
+ }
+}
+
+static int cmp_range(const void *x1, const void *x2)
+{
+ const struct range *r1 = x1;
+ const struct range *r2 = x2;
+ s64 start1, start2;
+
+ start1 = r1->start;
+ start2 = r2->start;
+
+ return start1 - start2;
+}
+
+int clean_sort_range(struct range *range, int az)
+{
+ int i, j, k = az - 1, nr_range = 0;
+
+ for (i = 0; i < k; i++) {
+ if (range[i].end)
+ continue;
+ for (j = k; j > i; j--) {
+ if (range[j].end) {
+ k = j;
+ break;
+ }
+ }
+ if (j == i)
+ break;
+ range[i].start = range[k].start;
+ range[i].end = range[k].end;
+ range[k].start = 0;
+ range[k].end = 0;
+ k--;
+ }
+ /* count it */
+ for (i = 0; i < az; i++) {
+ if (!range[i].end) {
+ nr_range = i;
+ break;
+ }
+ }
+
+ /* sort them */
+ sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
+
+ return nr_range;
+}
+
+void sort_range(struct range *range, int nr_range)
+{
+ /* sort them */
+ sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
+}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 258cdf0..58df55b 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -818,13 +818,13 @@
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -877,13 +877,13 @@
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
diff --git a/kernel/resource.c b/kernel/resource.c
index 4e9d87f..2d5be5d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -304,7 +304,7 @@
void *arg, int (*func)(unsigned long, unsigned long, void *))
{
struct resource res;
- unsigned long pfn, len;
+ unsigned long pfn, end_pfn;
u64 orig_end;
int ret = -1;
@@ -314,9 +314,10 @@
orig_end = res.end;
while ((res.start < res.end) &&
(find_next_system_ram(&res, "System RAM") >= 0)) {
- pfn = (unsigned long)(res.start >> PAGE_SHIFT);
- len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
- ret = (*func)(pfn, len, arg);
+ pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ end_pfn = (res.end + 1) >> PAGE_SHIFT;
+ if (end_pfn > pfn)
+ ret = (*func)(pfn, end_pfn - pfn, arg);
if (ret)
break;
res.start = res.end + 1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 6a212c9..abb36b1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1521,7 +1521,7 @@
#ifdef CONFIG_FAIR_GROUP_SCHED
-static __read_mostly unsigned long *update_shares_data;
+static __read_mostly unsigned long __percpu *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@@ -8813,7 +8813,7 @@
struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */
- u64 *cpuusage;
+ u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
};
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 912823e..9bb9fb1 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -45,7 +45,7 @@
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
static const struct cpumask *active_cpus;
-static void *stop_machine_work;
+static void __percpu *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8c1b2d2..0287f9f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/fs.h>
+#include <asm/local.h>
#include "trace.h"
/*
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477ca..df74c79 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/time.h>
+#include <asm/local.h>
struct rb_page {
u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 032c57c..ed01fdb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -92,12 +92,12 @@
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
- __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_inc(ftrace_cpu_disabled);
}
static inline void ftrace_enable_cpu(void)
{
- __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}
@@ -1166,7 +1166,7 @@
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e998a82..3fc2a57 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -188,7 +188,7 @@
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -247,7 +247,7 @@
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/mm/Kconfig b/mm/Kconfig
index d34c2b9..9c61158 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -115,6 +115,10 @@
config SPARSEMEM_VMEMMAP_ENABLE
bool
+config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ def_bool y
+ depends on SPARSEMEM && X86_64
+
config SPARSEMEM_VMEMMAP
bool "Sparse Memory virtual memmap"
depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 7d14868..d7c791e 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -13,6 +13,7 @@
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/kmemleak.h>
+#include <linux/range.h>
#include <asm/bug.h>
#include <asm/io.h>
@@ -32,6 +33,7 @@
unsigned long saved_max_pfn;
#endif
+#ifndef CONFIG_NO_BOOTMEM
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -142,7 +144,7 @@
min_low_pfn = start;
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-
+#endif
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
@@ -167,6 +169,60 @@
}
}
+#ifdef CONFIG_NO_BOOTMEM
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int i;
+ unsigned long start_aligned, end_aligned;
+ int order = ilog2(BITS_PER_LONG);
+
+ start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+ end_aligned = end & ~(BITS_PER_LONG - 1);
+
+ if (end_aligned <= start_aligned) {
+#if 1
+ printk(KERN_DEBUG " %lx - %lx\n", start, end);
+#endif
+ for (i = start; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ return;
+ }
+
+#if 1
+ printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
+ start, start_aligned, end_aligned, end);
+#endif
+ for (i = start; i < start_aligned; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
+ __free_pages_bootmem(pfn_to_page(i), order);
+
+ for (i = end_aligned; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+}
+
+unsigned long __init free_all_memory_core_early(int nodeid)
+{
+ int i;
+ u64 start, end;
+ unsigned long count = 0;
+ struct range *range = NULL;
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
+
+ return count;
+}
+#else
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
int aligned;
@@ -227,6 +283,7 @@
return count;
}
+#endif
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -237,7 +294,12 @@
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
+#ifdef CONFIG_NO_BOOTMEM
+ /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ return 0;
+#else
return free_all_bootmem_core(pgdat->bdata);
+#endif
}
/**
@@ -247,9 +309,14 @@
*/
unsigned long __init free_all_bootmem(void)
{
+#ifdef CONFIG_NO_BOOTMEM
+ return free_all_memory_core_early(NODE_DATA(0)->node_id);
+#else
return free_all_bootmem_core(NODE_DATA(0)->bdata);
+#endif
}
+#ifndef CONFIG_NO_BOOTMEM
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
@@ -344,6 +411,7 @@
}
BUG();
}
+#endif
/**
* free_bootmem_node - mark a page range as usable
@@ -358,6 +426,12 @@
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(physaddr, physaddr + size);
+#if 0
+ printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
+#endif
+#else
unsigned long start, end;
kmemleak_free_part(__va(physaddr), size);
@@ -366,6 +440,7 @@
end = PFN_DOWN(physaddr + size);
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
+#endif
}
/**
@@ -379,6 +454,12 @@
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(addr, addr + size);
+#if 0
+ printk(KERN_DEBUG "free %lx %lx\n", addr, size);
+#endif
+#else
unsigned long start, end;
kmemleak_free_part(__va(addr), size);
@@ -387,6 +468,7 @@
end = PFN_DOWN(addr + size);
mark_bootmem(start, end, 0, 0);
+#endif
}
/**
@@ -403,12 +485,17 @@
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
+#ifdef CONFIG_NO_BOOTMEM
+ panic("no bootmem");
+ return 0;
+#else
unsigned long start, end;
start = PFN_DOWN(physaddr);
end = PFN_UP(physaddr + size);
return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
+#endif
}
/**
@@ -424,14 +511,20 @@
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
+#ifdef CONFIG_NO_BOOTMEM
+ panic("no bootmem");
+ return 0;
+#else
unsigned long start, end;
start = PFN_DOWN(addr);
end = PFN_UP(addr + size);
return mark_bootmem(start, end, 1, flags);
+#endif
}
+#ifndef CONFIG_NO_BOOTMEM
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
@@ -582,12 +675,33 @@
#endif
return NULL;
}
+#endif
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
+#ifdef CONFIG_NO_BOOTMEM
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
+
+restart:
+
+ ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+
+ if (ptr)
+ return ptr;
+
+ if (goal != 0) {
+ goal = 0;
+ goto restart;
+ }
+
+ return NULL;
+#else
bootmem_data_t *bdata;
void *region;
@@ -613,6 +727,7 @@
}
return NULL;
+#endif
}
/**
@@ -631,7 +746,13 @@
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
- return ___alloc_bootmem_nopanic(size, align, goal, 0);
+ unsigned long limit = 0;
+
+#ifdef CONFIG_NO_BOOTMEM
+ limit = -1UL;
+#endif
+
+ return ___alloc_bootmem_nopanic(size, align, goal, limit);
}
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
@@ -665,9 +786,16 @@
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
- return ___alloc_bootmem(size, align, goal, 0);
+ unsigned long limit = 0;
+
+#ifdef CONFIG_NO_BOOTMEM
+ limit = -1UL;
+#endif
+
+ return ___alloc_bootmem(size, align, goal, limit);
}
+#ifndef CONFIG_NO_BOOTMEM
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
@@ -684,6 +812,7 @@
return ___alloc_bootmem(size, align, goal, limit);
}
+#endif
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
@@ -706,7 +835,46 @@
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+#else
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+#endif
+}
+
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+#ifdef MAX_DMA32_PFN
+ unsigned long end_pfn;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ /* update goal according ...MAX_DMA32_PFN */
+ end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+
+ if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
+ (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
+ void *ptr;
+ unsigned long new_goal;
+
+ new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
+#ifdef CONFIG_NO_BOOTMEM
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ new_goal, -1ULL);
+#else
+ ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+ new_goal, 0);
+#endif
+ if (ptr)
+ return ptr;
+ }
+#endif
+
+ return __alloc_bootmem_node(pgdat, size, align, goal);
+
}
#ifdef CONFIG_SPARSEMEM
@@ -720,6 +888,16 @@
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
+#ifdef CONFIG_NO_BOOTMEM
+ unsigned long pfn, goal, limit;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+
+ return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
+ SMP_CACHE_BYTES, goal, limit);
+#else
bootmem_data_t *bdata;
unsigned long pfn, goal, limit;
@@ -729,6 +907,7 @@
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
+#endif
}
#endif
@@ -740,11 +919,16 @@
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+#else
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;
ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
+#endif
if (ptr)
return ptr;
@@ -795,6 +979,11 @@
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+#ifdef CONFIG_NO_BOOTMEM
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
+#else
return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT);
+#endif
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8deb9d0..a6b17aa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1009,10 +1009,10 @@
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- local_irq_save(flags);
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
local_irq_restore(flags);
@@ -1096,7 +1096,6 @@
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
- pcp = &zone_pcp(zone, get_cpu())->pcp;
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags);
@@ -1119,6 +1118,7 @@
migratetype = MIGRATE_MOVABLE;
}
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (cold)
list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
@@ -1131,7 +1131,6 @@
out:
local_irq_restore(flags);
- put_cpu();
}
void free_hot_page(struct page *page)
@@ -1181,17 +1180,15 @@
unsigned long flags;
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
- int cpu;
again:
- cpu = get_cpu();
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
- pcp = &zone_pcp(zone, cpu)->pcp;
- list = &pcp->lists[migratetype];
local_irq_save(flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
@@ -1232,7 +1229,6 @@
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
- put_cpu();
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1241,7 +1237,6 @@
failed:
local_irq_restore(flags);
- put_cpu();
return NULL;
}
@@ -2180,7 +2175,7 @@
for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, cpu);
+ pageset = per_cpu_ptr(zone->pageset, cpu);
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
cpu, pageset->pcp.high,
@@ -2745,10 +2740,29 @@
#endif /* CONFIG_NUMA */
+/*
+ * Boot pageset table. One per cpu which is going to be used for all
+ * zones and all nodes. The parameters will be set in such a way
+ * that an item put on a list will immediately be handed over to
+ * the buddy list. This is safe since pageset manipulation is done
+ * with interrupts disabled.
+ *
+ * The boot_pagesets must be kept even after bootup is complete for
+ * unused processors and/or zones. They do play a role for bootstrapping
+ * hotplugged processors.
+ *
+ * zoneinfo_show() and maybe other functions do
+ * not check if the processor is online before following the pageset pointer.
+ * Other parts of the kernel may not check if the zone is available.
+ */
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
+static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+
/* return values int ....just for stop_machine() */
static int __build_all_zonelists(void *dummy)
{
int nid;
+ int cpu;
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -2759,6 +2773,23 @@
build_zonelists(pgdat);
build_zonelist_cache(pgdat);
}
+
+ /*
+ * Initialize the boot_pagesets that are going to be used
+ * for bootstrapping processors. The real pagesets for
+ * each zone will be allocated later when the per cpu
+ * allocator is available.
+ *
+ * boot_pagesets are used also for bootstrapping offline
+ * cpus if the system is already booted because the pagesets
+ * are needed to initialize allocators on a specific cpu too.
+ * F.e. the percpu allocator needs the page allocator which
+ * needs the percpu allocator in order to allocate its pagesets
+ * (a chicken-egg dilemma).
+ */
+ for_each_possible_cpu(cpu)
+ setup_pageset(&per_cpu(boot_pageset, cpu), 0);
+
return 0;
}
@@ -3096,121 +3127,33 @@
pcp->batch = PAGE_SHIFT * 8;
}
-
-#ifdef CONFIG_NUMA
/*
- * Boot pageset table. One per cpu which is going to be used for all
- * zones and all nodes. The parameters will be set in such a way
- * that an item put on a list will immediately be handed over to
- * the buddy list. This is safe since pageset manipulation is done
- * with interrupts disabled.
- *
- * Some NUMA counter updates may also be caught by the boot pagesets.
- *
- * The boot_pagesets must be kept even after bootup is complete for
- * unused processors and/or zones. They do play a role for bootstrapping
- * hotplugged processors.
- *
- * zoneinfo_show() and maybe other functions do
- * not check if the processor is online before following the pageset pointer.
- * Other parts of the kernel may not check if the zone is available.
+ * Allocate per cpu pagesets and initialize them.
+ * Before this call only boot pagesets were available.
+ * Boot pagesets will no longer be used by this processorr
+ * after setup_per_cpu_pageset().
*/
-static struct per_cpu_pageset boot_pageset[NR_CPUS];
-
-/*
- * Dynamically allocate memory for the
- * per cpu pageset array in struct zone.
- */
-static int __cpuinit process_zones(int cpu)
-{
- struct zone *zone, *dzone;
- int node = cpu_to_node(cpu);
-
- node_set_state(node, N_CPU); /* this node has a cpu */
-
- for_each_populated_zone(zone) {
- zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
- GFP_KERNEL, node);
- if (!zone_pcp(zone, cpu))
- goto bad;
-
- setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
-
- if (percpu_pagelist_fraction)
- setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
- }
-
- return 0;
-bad:
- for_each_zone(dzone) {
- if (!populated_zone(dzone))
- continue;
- if (dzone == zone)
- break;
- kfree(zone_pcp(dzone, cpu));
- zone_pcp(dzone, cpu) = &boot_pageset[cpu];
- }
- return -ENOMEM;
-}
-
-static inline void free_zone_pagesets(int cpu)
-{
- struct zone *zone;
-
- for_each_zone(zone) {
- struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
-
- /* Free per_cpu_pageset if it is slab allocated */
- if (pset != &boot_pageset[cpu])
- kfree(pset);
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- }
-}
-
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- int cpu = (long)hcpu;
- int ret = NOTIFY_OK;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (process_zones(cpu))
- ret = NOTIFY_BAD;
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_zone_pagesets(cpu);
- break;
- default:
- break;
- }
- return ret;
-}
-
-static struct notifier_block __cpuinitdata pageset_notifier =
- { &pageset_cpuup_callback, NULL, 0 };
-
void __init setup_per_cpu_pageset(void)
{
- int err;
+ struct zone *zone;
+ int cpu;
- /* Initialize per_cpu_pageset for cpu 0.
- * A cpuup callback will do this for every cpu
- * as it comes online
- */
- err = process_zones(smp_processor_id());
- BUG_ON(err);
- register_cpu_notifier(&pageset_notifier);
+ for_each_populated_zone(zone) {
+ zone->pageset = alloc_percpu(struct per_cpu_pageset);
+
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
+
+ setup_pageset(pcp, zone_batchsize(zone));
+
+ if (percpu_pagelist_fraction)
+ setup_pagelist_highmark(pcp,
+ (zone->present_pages /
+ percpu_pagelist_fraction));
+ }
+ }
}
-#endif
-
static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
@@ -3264,7 +3207,7 @@
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
@@ -3282,21 +3225,17 @@
static __meminit void zone_pcp_init(struct zone *zone)
{
- int cpu;
- unsigned long batch = zone_batchsize(zone);
+ /*
+ * per cpu subsystem is not up at this point. The following code
+ * relies on the ability of the linker to provide the
+ * offset of a (static) per cpu variable into the per cpu area.
+ */
+ zone->pageset = &boot_pageset;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
-#ifdef CONFIG_NUMA
- /* Early boot. Slab allocator not functional yet */
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- setup_pageset(&boot_pageset[cpu],0);
-#else
- setup_pageset(zone_pcp(zone,cpu), batch);
-#endif
- }
if (zone->present_pages)
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
- zone->name, zone->present_pages, batch);
+ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
+ zone->name, zone->present_pages,
+ zone_batchsize(zone));
}
__meminit int init_currently_empty_zone(struct zone *zone,
@@ -3435,6 +3374,61 @@
}
}
+int __init add_from_early_node_map(struct range *range, int az,
+ int nr_range, int nid)
+{
+ int i;
+ u64 start, end;
+
+ /* need to go over early_node_map to find out good range for node */
+ for_each_active_range_index_in_nid(i, nid) {
+ start = early_node_map[i].start_pfn;
+ end = early_node_map[i].end_pfn;
+ nr_range = add_range(range, az, nr_range, start, end);
+ }
+ return nr_range;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ int i;
+ void *ptr;
+
+ /* need to go over early_node_map to find out good range for node */
+ for_each_active_range_index_in_nid(i, nid) {
+ u64 addr;
+ u64 ei_start, ei_last;
+
+ ei_last = early_node_map[i].end_pfn;
+ ei_last <<= PAGE_SHIFT;
+ ei_start = early_node_map[i].start_pfn;
+ ei_start <<= PAGE_SHIFT;
+ addr = find_early_area(ei_start, ei_last,
+ goal, limit, size, align);
+
+ if (addr == -1ULL)
+ continue;
+
+#if 0
+ printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
+ nid,
+ ei_start, ei_last, goal, limit, size,
+ align, addr);
+#endif
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ return ptr;
+ }
+
+ return NULL;
+}
+#endif
+
+
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
{
int i;
@@ -4467,7 +4461,11 @@
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
+struct pglist_data __refdata contig_page_data = {
+#ifndef CONFIG_NO_BOOTMEM
+ .bdata = &bootmem_node_data[0]
+#endif
+ };
EXPORT_SYMBOL(contig_page_data);
#endif
@@ -4810,10 +4808,11 @@
if (!write || (ret == -EINVAL))
return ret;
for_each_populated_zone(zone) {
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
- setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+ setup_pagelist_highmark(
+ per_cpu_ptr(zone->pageset, cpu), high);
}
}
return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index 083e7c9..768419d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -80,13 +80,15 @@
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr) \
- (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
- + (unsigned long)__per_cpu_start)
+ (void __percpu *)((unsigned long)(addr) - \
+ (unsigned long)pcpu_base_addr + \
+ (unsigned long)__per_cpu_start)
#endif
#ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr) \
- (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
- - (unsigned long)__per_cpu_start)
+ (void __force *)((unsigned long)(ptr) + \
+ (unsigned long)pcpu_base_addr - \
+ (unsigned long)__per_cpu_start)
#endif
struct pcpu_chunk {
@@ -913,11 +915,10 @@
int rs, re;
/* quick path, check whether it's empty already */
- pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- return;
- break;
- }
+ rs = page_start;
+ pcpu_next_unpop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ return;
/* immutable chunks can't be depopulated */
WARN_ON(chunk->immutable);
@@ -968,11 +969,10 @@
int rs, re, rc;
/* quick path, check whether all pages are already there */
- pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- goto clear;
- break;
- }
+ rs = page_start;
+ pcpu_next_pop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ goto clear;
/* need to allocate and map pages, this chunk can't be immutable */
WARN_ON(chunk->immutable);
@@ -1067,7 +1067,7 @@
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
{
static int warn_limit = 10;
struct pcpu_chunk *chunk;
@@ -1196,7 +1196,7 @@
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-void *__alloc_percpu(size_t size, size_t align)
+void __percpu *__alloc_percpu(size_t size, size_t align)
{
return pcpu_alloc(size, align, false);
}
@@ -1217,7 +1217,7 @@
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
*/
-void *__alloc_reserved_percpu(size_t size, size_t align)
+void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
{
return pcpu_alloc(size, align, true);
}
@@ -1269,7 +1269,7 @@
* CONTEXT:
* Can be called from atomic context.
*/
-void free_percpu(void *ptr)
+void free_percpu(void __percpu *ptr)
{
void *addr;
struct pcpu_chunk *chunk;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d9714bd..392b9bb 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -40,9 +40,11 @@
unsigned long align,
unsigned long goal)
{
- return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
+ return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
}
+static void *vmemmap_buf;
+static void *vmemmap_buf_end;
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
@@ -64,6 +66,24 @@
__pa(MAX_DMA_ADDRESS));
}
+/* need to make sure size is all the same during early stage */
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
+{
+ void *ptr;
+
+ if (!vmemmap_buf)
+ return vmemmap_alloc_block(size, node);
+
+ /* take the from buf */
+ ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
+ if (ptr + size > vmemmap_buf_end)
+ return vmemmap_alloc_block(size, node);
+
+ vmemmap_buf = ptr + size;
+
+ return ptr;
+}
+
void __meminit vmemmap_verify(pte_t *pte, int node,
unsigned long start, unsigned long end)
{
@@ -80,7 +100,7 @@
pte_t *pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
pte_t entry;
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
if (!p)
return NULL;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -163,3 +183,55 @@
return map;
}
+
+void __init sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ unsigned long pnum;
+ unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
+ void *vmemmap_buf_start;
+
+ size = ALIGN(size, PMD_SIZE);
+ vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
+ PMD_SIZE, __pa(MAX_DMA_ADDRESS));
+
+ if (vmemmap_buf_start) {
+ vmemmap_buf = vmemmap_buf_start;
+ vmemmap_buf_end = vmemmap_buf_start + size * map_count;
+ }
+
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+ if (map_map[pnum])
+ continue;
+ ms = __nr_to_section(pnum);
+ printk(KERN_ERR "%s: sparsemem memory map backing failed "
+ "some memory will not be available.\n", __func__);
+ ms->section_mem_map = 0;
+ }
+
+ if (vmemmap_buf_start) {
+ /* need to free left buf */
+#ifdef CONFIG_NO_BOOTMEM
+ free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
+ if (vmemmap_buf_start < vmemmap_buf) {
+ char name[15];
+
+ snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
+ reserve_early_without_check(__pa(vmemmap_buf_start),
+ __pa(vmemmap_buf), name);
+ }
+#else
+ free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
+#endif
+ vmemmap_buf = NULL;
+ vmemmap_buf_end = NULL;
+ }
+}
diff --git a/mm/sparse.c b/mm/sparse.c
index 6ce4aab..22896d5 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -271,7 +271,8 @@
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
-sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+ unsigned long count)
{
unsigned long section_nr;
@@ -286,7 +287,7 @@
* this problem.
*/
section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
- return alloc_bootmem_section(usemap_size(), section_nr);
+ return alloc_bootmem_section(usemap_size() * count, section_nr);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -329,7 +330,8 @@
}
#else
static unsigned long * __init
-sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat)
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+ unsigned long count)
{
return NULL;
}
@@ -339,27 +341,40 @@
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
+static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long usemap_count, int nodeid)
{
- unsigned long *usemap;
- struct mem_section *ms = __nr_to_section(pnum);
- int nid = sparse_early_nid(ms);
+ void *usemap;
+ unsigned long pnum;
+ int size = usemap_size();
- usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid));
- if (usemap)
- return usemap;
-
- usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
+ usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
+ usemap_count);
if (usemap) {
- check_usemap_section_nr(nid, usemap);
- return usemap;
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ usemap_map[pnum] = usemap;
+ usemap += size;
+ }
+ return;
}
- /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
- nid = 0;
+ usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+ if (usemap) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ usemap_map[pnum] = usemap;
+ usemap += size;
+ check_usemap_section_nr(nodeid, usemap_map[pnum]);
+ }
+ return;
+ }
printk(KERN_WARNING "%s: allocation failed\n", __func__);
- return NULL;
}
#ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -375,8 +390,65 @@
PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
return map;
}
+void __init sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ void *map;
+ unsigned long pnum;
+ unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
+
+ map = alloc_remap(nodeid, size * map_count);
+ if (map) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = map;
+ map += size;
+ }
+ return;
+ }
+
+ size = PAGE_ALIGN(size);
+ map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
+ if (map) {
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = map;
+ map += size;
+ }
+ return;
+ }
+
+ /* fallback */
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+ if (map_map[pnum])
+ continue;
+ ms = __nr_to_section(pnum);
+ printk(KERN_ERR "%s: sparsemem memory map backing failed "
+ "some memory will not be available.\n", __func__);
+ ms->section_mem_map = 0;
+ }
+}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count, int nodeid)
+{
+ sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
+ map_count, nodeid);
+}
+#else
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
@@ -392,10 +464,12 @@
ms->section_mem_map = 0;
return NULL;
}
+#endif
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
{
}
+
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
@@ -407,6 +481,14 @@
unsigned long *usemap;
unsigned long **usemap_map;
int size;
+ int nodeid_begin = 0;
+ unsigned long pnum_begin = 0;
+ unsigned long usemap_count;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ unsigned long map_count;
+ int size2;
+ struct page **map_map;
+#endif
/*
* map is using big page (aka 2M in x86 64 bit)
@@ -425,10 +507,81 @@
panic("can not allocate usemap_map\n");
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+
if (!present_section_nr(pnum))
continue;
- usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
+ ms = __nr_to_section(pnum);
+ nodeid_begin = sparse_early_nid(ms);
+ pnum_begin = pnum;
+ break;
}
+ usemap_count = 1;
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+ int nodeid;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid = sparse_early_nid(ms);
+ if (nodeid == nodeid_begin) {
+ usemap_count++;
+ continue;
+ }
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
+ usemap_count, nodeid_begin);
+ /* new start, update count etc*/
+ nodeid_begin = nodeid;
+ pnum_begin = pnum;
+ usemap_count = 1;
+ }
+ /* ok, last chunk */
+ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
+ usemap_count, nodeid_begin);
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+ map_map = alloc_bootmem(size2);
+ if (!map_map)
+ panic("can not allocate map_map\n");
+
+ for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid_begin = sparse_early_nid(ms);
+ pnum_begin = pnum;
+ break;
+ }
+ map_count = 1;
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+ int nodeid;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid = sparse_early_nid(ms);
+ if (nodeid == nodeid_begin) {
+ map_count++;
+ continue;
+ }
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
+ map_count, nodeid_begin);
+ /* new start, update count etc*/
+ nodeid_begin = nodeid;
+ pnum_begin = pnum;
+ map_count = 1;
+ }
+ /* ok, last chunk */
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
+ map_count, nodeid_begin);
+#endif
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
if (!present_section_nr(pnum))
@@ -438,7 +591,11 @@
if (!usemap)
continue;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ map = map_map[pnum];
+#else
map = sparse_early_mem_map_alloc(pnum);
+#endif
if (!map)
continue;
@@ -448,6 +605,9 @@
vmemmap_populate_print_last();
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+ free_bootmem(__pa(map_map), size2);
+#endif
free_bootmem(__pa(usemap_map), size);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6051fba..fc5aa18 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -139,7 +139,8 @@
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
- zone_pcp(zone, cpu)->stat_threshold = threshold;
+ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ = threshold;
}
}
@@ -149,7 +150,8 @@
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
+
s8 *p = pcp->vm_stat_diff + item;
long x;
@@ -202,7 +204,7 @@
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)++;
@@ -223,7 +225,7 @@
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)--;
@@ -300,7 +302,7 @@
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
- p = zone_pcp(zone, cpu);
+ p = per_cpu_ptr(zone->pageset, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
@@ -741,7 +743,7 @@
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, i);
+ pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
@@ -906,6 +908,7 @@
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
start_cpu_timer(cpu);
+ node_set_state(cpu_to_node(cpu), N_CPU);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
diff --git a/security/capability.c b/security/capability.c
index 5c700e1..4875142 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -906,10 +906,6 @@
}
#endif /* CONFIG_AUDIT */
-struct security_operations default_security_ops = {
- .name = "default",
-};
-
#define set_to_cap_if_null(ops, function) \
do { \
if (!ops->function) { \
diff --git a/security/commoncap.c b/security/commoncap.c
index f800fdb..6166973 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
+#include <linux/syslog.h>
/*
* If a non-root user executes a setuid-root binary in
@@ -888,13 +889,17 @@
/**
* cap_syslog - Determine whether syslog function is permitted
* @type: Function requested
+ * @from_file: Whether this request came from an open file (i.e. /proc)
*
* Determine whether the current process is permitted to use a particular
* syslog function, returning 0 if permission is granted, -ve if not.
*/
-int cap_syslog(int type)
+int cap_syslog(int type, bool from_file)
{
- if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
+ if (type != SYSLOG_ACTION_OPEN && from_file)
+ return 0;
+ if ((type != SYSLOG_ACTION_READ_ALL &&
+ type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
diff --git a/security/security.c b/security/security.c
index 7da630a..b98334b 100644
--- a/security/security.c
+++ b/security/security.c
@@ -23,10 +23,12 @@
CONFIG_DEFAULT_SECURITY;
/* things that live in capability.c */
-extern struct security_operations default_security_ops;
extern void security_fixup_ops(struct security_operations *ops);
-struct security_operations *security_ops; /* Initialized to NULL */
+static struct security_operations *security_ops;
+static struct security_operations default_security_ops = {
+ .name = "default",
+};
static inline int verify(struct security_operations *ops)
{
@@ -63,6 +65,11 @@
return 0;
}
+void reset_security_ops(void)
+{
+ security_ops = &default_security_ops;
+}
+
/* Save user chosen LSM */
static int __init choose_lsm(char *str)
{
@@ -203,9 +210,9 @@
return security_ops->quota_on(dentry);
}
-int security_syslog(int type)
+int security_syslog(int type, bool from_file)
{
- return security_ops->syslog(type);
+ return security_ops->syslog(type, from_file);
}
int security_settime(struct timespec *ts, struct timezone *tz)
@@ -389,42 +396,42 @@
EXPORT_SYMBOL(security_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *path, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
unsigned int dev)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mknod(path, dentry, mode, dev);
+ return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
-int security_path_mkdir(struct path *path, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mkdir(path, dentry, mode);
+ return security_ops->path_mkdir(dir, dentry, mode);
}
-int security_path_rmdir(struct path *path, struct dentry *dentry)
+int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_rmdir(path, dentry);
+ return security_ops->path_rmdir(dir, dentry);
}
-int security_path_unlink(struct path *path, struct dentry *dentry)
+int security_path_unlink(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_unlink(path, dentry);
+ return security_ops->path_unlink(dir, dentry);
}
-int security_path_symlink(struct path *path, struct dentry *dentry,
+int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_symlink(path, dentry, old_name);
+ return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -630,14 +637,14 @@
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index f2dde26..db0fd9f 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -489,17 +489,14 @@
struct common_audit_data stack_data;
u32 denied, audited;
denied = requested & ~avd->allowed;
- if (denied) {
- audited = denied;
- if (!(audited & avd->auditdeny))
- return;
- } else if (result) {
+ if (denied)
+ audited = denied & avd->auditdeny;
+ else if (result)
audited = denied = requested;
- } else {
- audited = requested;
- if (!(audited & avd->auditallow))
- return;
- }
+ else
+ audited = requested & avd->auditallow;
+ if (!audited)
+ return;
if (!a) {
a = &stack_data;
memset(a, 0, sizeof(*a));
@@ -746,9 +743,7 @@
else
avd = &avd_entry;
- rc = security_compute_av(ssid, tsid, tclass, requested, avd);
- if (rc)
- goto out;
+ security_compute_av(ssid, tsid, tclass, avd);
rcu_read_lock();
node = avc_insert(ssid, tsid, tclass, avd);
} else {
@@ -770,7 +765,6 @@
}
rcu_read_unlock();
-out:
return rc;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e1202cb..63c2d36d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -76,6 +76,7 @@
#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
+#include <linux/syslog.h>
#include "avc.h"
#include "objsec.h"
@@ -125,13 +126,6 @@
int selinux_enabled = 1;
#endif
-
-/*
- * Minimal support for a secondary security module,
- * just to allow the use of the capability module.
- */
-static struct security_operations *secondary_ops;
-
/* Lists of inode and superblock security structures initialized
before the policy was loaded. */
static LIST_HEAD(superblock_security_head);
@@ -2049,29 +2043,30 @@
return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
}
-static int selinux_syslog(int type)
+static int selinux_syslog(int type, bool from_file)
{
int rc;
- rc = cap_syslog(type);
+ rc = cap_syslog(type, from_file);
if (rc)
return rc;
switch (type) {
- case 3: /* Read last kernel messages */
- case 10: /* Return size of the log buffer */
+ case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
+ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
- case 6: /* Disable logging to console */
- case 7: /* Enable logging to console */
- case 8: /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
- case 0: /* Close log */
- case 1: /* Open log */
- case 2: /* Read from log */
- case 4: /* Read/clear last kernel messages */
- case 5: /* Clear ring buffer */
+ case SYSLOG_ACTION_CLOSE: /* Close log */
+ case SYSLOG_ACTION_OPEN: /* Open log */
+ case SYSLOG_ACTION_READ: /* Read from log */
+ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
@@ -3334,7 +3329,7 @@
if (ret == 0)
tsec->create_sid = isec->sid;
- return 0;
+ return ret;
}
static int selinux_kernel_module_request(char *kmod_name)
@@ -5672,9 +5667,6 @@
0, SLAB_PANIC, NULL);
avc_init();
- secondary_ops = security_ops;
- if (!secondary_ops)
- panic("SELinux: No initial security operations\n");
if (register_security(&selinux_ops))
panic("SELinux: Unable to register with kernel.\n");
@@ -5835,8 +5827,7 @@
selinux_disabled = 1;
selinux_enabled = 0;
- /* Reset security_ops to the secondary module, dummy or capability. */
- security_ops = secondary_ops;
+ reset_security_ops();
/* Try to destroy the avc node cache */
avc_disable();
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 2553266..1f7c249 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -57,7 +57,6 @@
struct netlbl_lsm_secattr;
extern int selinux_enabled;
-extern int selinux_mls_enabled;
/* Policy capabilities */
enum {
@@ -80,6 +79,8 @@
/* limitation of boundary depth */
#define POLICYDB_BOUNDS_MAXDEPTH 4
+int security_mls_enabled(void);
+
int security_load_policy(void *data, size_t len);
int security_policycap_supported(unsigned int req_cap);
@@ -96,13 +97,11 @@
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
-int security_compute_av(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
-int security_compute_av_user(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av_user(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
int security_transition_sid(u32 ssid, u32 tsid,
u16 tclass, u32 *out_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index fab36fd..cd191bb 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -282,7 +282,8 @@
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_mls_enabled);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ security_mls_enabled());
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -494,7 +495,6 @@
char *scon, *tcon;
u32 ssid, tsid;
u16 tclass;
- u32 req;
struct av_decision avd;
ssize_t length;
@@ -512,7 +512,7 @@
goto out;
length = -EINVAL;
- if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4)
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
@@ -522,9 +522,7 @@
if (length < 0)
goto out2;
- length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
- if (length < 0)
- goto out2;
+ security_compute_av_user(ssid, tsid, tclass, &avd);
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u %x",
@@ -979,6 +977,8 @@
u32 sid;
/* remove any existing files */
+ for (i = 0; i < bool_num; i++)
+ kfree(bool_pending_names[i]);
kfree(bool_pending_names);
kfree(bool_pending_values);
bool_pending_names = NULL;
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index d9dd7a2..45e8fb0 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -41,9 +41,6 @@
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -64,9 +61,6 @@
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -82,9 +76,6 @@
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
@@ -93,9 +84,6 @@
static inline void mls_context_destroy(struct context *c)
{
- if (!selinux_mls_enabled)
- return;
-
ebitmap_destroy(&c->range.level[0].cat);
ebitmap_destroy(&c->range.level[1].cat);
mls_context_init(c);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 3f2b270..372b773 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -39,7 +39,7 @@
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
@@ -93,7 +93,7 @@
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
scontextp = *scontext;
@@ -200,7 +200,7 @@
{
struct user_datum *usrdatum;
- if (!selinux_mls_enabled)
+ if (!p->mls_enabled)
return 1;
if (!mls_range_isvalid(p, &c->range))
@@ -253,7 +253,7 @@
struct cat_datum *catdatum, *rngdatum;
int l, rc = -EINVAL;
- if (!selinux_mls_enabled) {
+ if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
*scontext += strlen(*scontext)+1;
return 0;
@@ -387,7 +387,7 @@
char *tmpstr, *freestr;
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return -EINVAL;
/* we need freestr because mls_context_to_sid will change
@@ -407,7 +407,7 @@
/*
* Copies the MLS range `range' into `context'.
*/
-static inline int mls_range_set(struct context *context,
+int mls_range_set(struct context *context,
struct mls_range *range)
{
int l, rc = 0;
@@ -427,7 +427,7 @@
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
- if (selinux_mls_enabled) {
+ if (policydb.mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
@@ -477,7 +477,7 @@
struct ebitmap_node *node;
int l, i;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
@@ -513,23 +513,21 @@
u32 specified,
struct context *newcontext)
{
- struct range_trans *rtr;
+ struct range_trans rtr;
+ struct mls_range *r;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
- for (rtr = policydb.range_tr; rtr; rtr = rtr->next) {
- if (rtr->source_type == scontext->type &&
- rtr->target_type == tcontext->type &&
- rtr->target_class == tclass) {
- /* Set the range from the rule */
- return mls_range_set(newcontext,
- &rtr->target_range);
- }
- }
+ rtr.source_type = scontext->type;
+ rtr.target_type = tcontext->type;
+ rtr.target_class = tclass;
+ r = hashtab_search(policydb.range_tr, &rtr);
+ if (r)
+ return mls_range_set(newcontext, r);
/* Fallthrough */
case AVTAB_CHANGE:
if (tclass == policydb.process_class)
@@ -541,8 +539,8 @@
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
- default:
- return -EINVAL;
+
+ /* fall through */
}
return -EINVAL;
}
@@ -561,7 +559,7 @@
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
@@ -581,7 +579,7 @@
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
@@ -603,7 +601,7 @@
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
@@ -631,7 +629,7 @@
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 1276715..cd91526 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -39,6 +39,8 @@
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
+int mls_range_set(struct context *context, struct mls_range *range);
+
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *context);
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index b6e943a..03bed52 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -15,6 +15,7 @@
#define _SS_MLS_TYPES_H_
#include "security.h"
+#include "ebitmap.h"
struct mls_level {
u32 sens; /* sensitivity */
@@ -27,18 +28,12 @@
static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens == l2->sens) &&
ebitmap_cmp(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens >= l2->sens) &&
ebitmap_contains(&l1->cat, &l2->cat));
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 6236198..24ced65 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -52,8 +52,6 @@
};
#endif
-int selinux_mls_enabled;
-
static unsigned int symtab_sizes[SYM_NUM] = {
2,
32,
@@ -177,6 +175,21 @@
goto out;
}
+static u32 rangetr_hash(struct hashtab *h, const void *k)
+{
+ const struct range_trans *key = k;
+ return (key->source_type + (key->target_type << 3) +
+ (key->target_class << 5)) & (h->size - 1);
+}
+
+static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+ const struct range_trans *key1 = k1, *key2 = k2;
+ return (key1->source_type != key2->source_type ||
+ key1->target_type != key2->target_type ||
+ key1->target_class != key2->target_class);
+}
+
/*
* Initialize a policy database structure.
*/
@@ -204,6 +217,10 @@
if (rc)
goto out_free_symtab;
+ p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
+ if (!p->range_tr)
+ goto out_free_symtab;
+
ebitmap_init(&p->policycaps);
ebitmap_init(&p->permissive_map);
@@ -408,6 +425,20 @@
info.slots_used, h->size, info.max_chain_len);
}
}
+
+static void rangetr_hash_eval(struct hashtab *h)
+{
+ struct hashtab_info info;
+
+ hashtab_stat(h, &info);
+ printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, "
+ "longest chain length %d\n", h->nel,
+ info.slots_used, h->size, info.max_chain_len);
+}
+#else
+static inline void rangetr_hash_eval(struct hashtab *h)
+{
+}
#endif
/*
@@ -422,7 +453,7 @@
printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
- if (selinux_mls_enabled)
+ if (p->mls_enabled)
printk(", %d sens, %d cats", p->p_levels.nprim,
p->p_cats.nprim);
printk("\n");
@@ -612,6 +643,17 @@
cat_destroy,
};
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
static void ocontext_destroy(struct ocontext *c, int i)
{
context_destroy(&c->context[0]);
@@ -632,7 +674,6 @@
int i;
struct role_allow *ra, *lra = NULL;
struct role_trans *tr, *ltr = NULL;
- struct range_trans *rt, *lrt = NULL;
for (i = 0; i < SYM_NUM; i++) {
cond_resched();
@@ -693,20 +734,8 @@
}
kfree(lra);
- for (rt = p->range_tr; rt; rt = rt->next) {
- cond_resched();
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
- }
- lrt = rt;
- }
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
- }
+ hashtab_map(p->range_tr, range_tr_destroy, NULL);
+ hashtab_destroy(p->range_tr);
if (p->type_attr_map) {
for (i = 0; i < p->p_types.nprim; i++)
@@ -1686,12 +1715,11 @@
int i, j, rc;
__le32 buf[4];
u32 nodebuf[8];
- u32 len, len2, config, nprim, nel, nel2;
+ u32 len, len2, nprim, nel, nel2;
char *policydb_str;
struct policydb_compat_info *info;
- struct range_trans *rt, *lrt;
-
- config = 0;
+ struct range_trans *rt;
+ struct mls_range *r;
rc = policydb_init(p);
if (rc)
@@ -1740,7 +1768,7 @@
kfree(policydb_str);
policydb_str = NULL;
- /* Read the version, config, and table sizes. */
+ /* Read the version and table sizes. */
rc = next_entry(buf, fp, sizeof(u32)*4);
if (rc < 0)
goto bad;
@@ -1755,13 +1783,7 @@
}
if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
- if (ss_initialized && !selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between non-MLS"
- " and MLS policies\n");
- goto bad;
- }
- selinux_mls_enabled = 1;
- config |= POLICYDB_CONFIG_MLS;
+ p->mls_enabled = 1;
if (p->policyvers < POLICYDB_VERSION_MLS) {
printk(KERN_ERR "SELinux: security policydb version %d "
@@ -1769,12 +1791,6 @@
p->policyvers);
goto bad;
}
- } else {
- if (ss_initialized && selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between MLS and"
- " non-MLS policies\n");
- goto bad;
- }
}
p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
@@ -2122,44 +2138,61 @@
if (rc < 0)
goto bad;
nel = le32_to_cpu(buf[0]);
- lrt = NULL;
for (i = 0; i < nel; i++) {
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt) {
rc = -ENOMEM;
goto bad;
}
- if (lrt)
- lrt->next = rt;
- else
- p->range_tr = rt;
rc = next_entry(buf, fp, (sizeof(u32) * 2));
- if (rc < 0)
+ if (rc < 0) {
+ kfree(rt);
goto bad;
+ }
rt->source_type = le32_to_cpu(buf[0]);
rt->target_type = le32_to_cpu(buf[1]);
if (new_rangetr) {
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc < 0) {
+ kfree(rt);
goto bad;
+ }
rt->target_class = le32_to_cpu(buf[0]);
} else
rt->target_class = p->process_class;
if (!policydb_type_isvalid(p, rt->source_type) ||
!policydb_type_isvalid(p, rt->target_type) ||
!policydb_class_isvalid(p, rt->target_class)) {
+ kfree(rt);
rc = -EINVAL;
goto bad;
}
- rc = mls_read_range_helper(&rt->target_range, fp);
- if (rc)
- goto bad;
- if (!mls_range_isvalid(p, &rt->target_range)) {
- printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ kfree(rt);
+ rc = -ENOMEM;
goto bad;
}
- lrt = rt;
+ rc = mls_read_range_helper(r, fp);
+ if (rc) {
+ kfree(rt);
+ kfree(r);
+ goto bad;
+ }
+ if (!mls_range_isvalid(p, r)) {
+ printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
+ kfree(rt);
+ kfree(r);
+ goto bad;
+ }
+ rc = hashtab_insert(p->range_tr, rt, r);
+ if (rc) {
+ kfree(rt);
+ kfree(r);
+ goto bad;
+ }
}
+ rangetr_hash_eval(p->range_tr);
}
p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index cdcc570..26d9adf 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -27,6 +27,8 @@
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
+#include "ebitmap.h"
+#include "mls_types.h"
#include "context.h"
#include "constraint.h"
@@ -113,8 +115,6 @@
u32 source_type;
u32 target_type;
u32 target_class;
- struct mls_range target_range;
- struct range_trans *next;
};
/* Boolean data type */
@@ -187,6 +187,8 @@
/* The policy database */
struct policydb {
+ int mls_enabled;
+
/* symbol tables */
struct symtab symtab[SYM_NUM];
#define p_commons symtab[SYM_COMMONS]
@@ -240,8 +242,8 @@
fixed labeling behavior. */
struct genfs *genfs;
- /* range transitions */
- struct range_trans *range_tr;
+ /* range transitions table (range_trans_key -> mls_range) */
+ struct hashtab *range_tr;
/* type -> attribute reverse mapping */
struct ebitmap *type_attr_map;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b3efae2..cf27b3e 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -26,6 +26,10 @@
*
* Added support for bounds domain and audit messaged on masked permissions
*
+ * Updated: Guido Trentalancia <guido@trentalancia.com>
+ *
+ * Added support for runtime switching of the policy type
+ *
* Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
@@ -87,11 +91,10 @@
static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd);
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd);
struct selinux_mapping {
u16 value; /* policy value */
@@ -196,23 +199,6 @@
return tclass;
}
-static u32 unmap_perm(u16 tclass, u32 tperm)
-{
- if (tclass < current_mapping_size) {
- unsigned i;
- u32 kperm = 0;
-
- for (i = 0; i < current_mapping[tclass].num_perms; i++)
- if (tperm & (1<<i)) {
- kperm |= current_mapping[tclass].perms[i];
- tperm &= ~(1<<i);
- }
- return kperm;
- }
-
- return tperm;
-}
-
static void map_decision(u16 tclass, struct av_decision *avd,
int allow_unknown)
{
@@ -250,6 +236,10 @@
}
}
+int security_mls_enabled(void)
+{
+ return policydb.mls_enabled;
+}
/*
* Return the boolean value of a constraint expression
@@ -465,7 +455,8 @@
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
- int index, length;
+ int index;
+ u32 length;
bool need_comma = false;
if (!permissions)
@@ -532,7 +523,6 @@
static void type_attribute_bounds_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
- u32 requested,
struct av_decision *avd)
{
struct context lo_scontext;
@@ -553,7 +543,6 @@
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -569,7 +558,6 @@
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -586,7 +574,6 @@
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -607,11 +594,10 @@
* Compute access vectors based on a context structure pair for
* the permissions in a particular class.
*/
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd)
{
struct constraint_node *constraint;
struct role_allow *ra;
@@ -622,19 +608,14 @@
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
- /*
- * Initialize the access vectors to the default values.
- */
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
- return -EINVAL;
+ return;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
@@ -705,9 +686,7 @@
* permission and notice it to userspace via audit.
*/
type_attribute_bounds_av(scontext, tcontext,
- tclass, requested, avd);
-
- return 0;
+ tclass, avd);
}
static int security_validtrans_handle_fail(struct context *ocontext,
@@ -864,7 +843,7 @@
if (rc) {
char *old_name = NULL;
char *new_name = NULL;
- int length;
+ u32 length;
if (!context_struct_to_string(old_context,
&old_name, &length) &&
@@ -886,110 +865,116 @@
return rc;
}
-
-static int security_compute_av_core(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void avd_init(struct av_decision *avd)
{
- struct context *scontext = NULL, *tcontext = NULL;
- int rc = 0;
-
- scontext = sidtab_search(&sidtab, ssid);
- if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, ssid);
- return -EINVAL;
- }
- tcontext = sidtab_search(&sidtab, tsid);
- if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, tsid);
- return -EINVAL;
- }
-
- rc = context_struct_compute_av(scontext, tcontext, tclass,
- requested, avd);
-
- /* permissive domain? */
- if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
- avd->flags |= AVD_FLAGS_PERMISSIVE;
-
- return rc;
+ avd->allowed = 0;
+ avd->auditallow = 0;
+ avd->auditdeny = 0xffffffff;
+ avd->seqno = latest_granting;
+ avd->flags = 0;
}
+
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
- * @requested: requested permissions
* @avd: access vector decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
- * Return -%EINVAL if any of the parameters are invalid or %0
- * if the access vector decisions were computed successfully.
*/
-int security_compute_av(u32 ssid,
- u32 tsid,
- u16 orig_tclass,
- u32 orig_requested,
- struct av_decision *avd)
+void security_compute_av(u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ struct av_decision *avd)
{
u16 tclass;
- u32 requested;
- int rc;
+ struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
-
+ avd_init(avd);
if (!ss_initialized)
goto allow;
- requested = unmap_perm(orig_tclass, orig_requested);
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
tclass = unmap_class(orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
if (policydb.allow_unknown)
goto allow;
- rc = -EINVAL;
goto out;
}
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
allow:
avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
- rc = 0;
goto out;
}
-int security_compute_av_user(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+void security_compute_av_user(u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ struct av_decision *avd)
{
- int rc;
-
- if (!ss_initialized) {
- avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- return 0;
- }
+ struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ avd_init(avd);
+ if (!ss_initialized)
+ goto allow;
+
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ if (unlikely(!tclass)) {
+ if (policydb.allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
+ out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
+allow:
+ avd->allowed = 0xffffffff;
+ goto out;
}
/*
@@ -1565,7 +1550,10 @@
{
struct sidtab *s = arg;
- return sidtab_insert(s, sid, context);
+ if (sid > SECINITSID_NUM)
+ return sidtab_insert(s, sid, context);
+ else
+ return 0;
}
static inline int convert_context_handle_invalid_context(struct context *context)
@@ -1606,12 +1594,17 @@
{
struct convert_context_args *args;
struct context oldc;
+ struct ocontext *oc;
+ struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
- int rc;
+ int rc = 0;
+
+ if (key <= SECINITSID_NUM)
+ goto out;
args = p;
@@ -1673,9 +1666,39 @@
goto bad;
c->type = typdatum->value;
- rc = mls_convert_context(args->oldp, args->newp, c);
- if (rc)
- goto bad;
+ /* Convert the MLS fields if dealing with MLS policies */
+ if (args->oldp->mls_enabled && args->newp->mls_enabled) {
+ rc = mls_convert_context(args->oldp, args->newp, c);
+ if (rc)
+ goto bad;
+ } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
+ /*
+ * Switching between MLS and non-MLS policy:
+ * free any storage used by the MLS fields in the
+ * context for all existing entries in the sidtab.
+ */
+ mls_context_destroy(c);
+ } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
+ /*
+ * Switching between non-MLS and MLS policy:
+ * ensure that the MLS fields of the context for all
+ * existing entries in the sidtab are filled in with a
+ * suitable default value, likely taken from one of the
+ * initial SIDs.
+ */
+ oc = args->newp->ocontexts[OCON_ISID];
+ while (oc && oc->sid[0] != SECINITSID_UNLABELED)
+ oc = oc->next;
+ if (!oc) {
+ printk(KERN_ERR "SELinux: unable to look up"
+ " the initial SIDs list\n");
+ goto bad;
+ }
+ range = &oc->context[0].range;
+ rc = mls_range_set(c, range);
+ if (rc)
+ goto bad;
+ }
/* Check the validity of the new context. */
if (!policydb_context_isvalid(args->newp, c)) {
@@ -1771,9 +1794,17 @@
if (policydb_read(&newpolicydb, fp))
return -EINVAL;
- if (sidtab_init(&newsidtab)) {
+ /* If switching between different policy types, log MLS status */
+ if (policydb.mls_enabled && !newpolicydb.mls_enabled)
+ printk(KERN_INFO "SELinux: Disabling MLS support...\n");
+ else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
+ printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+
+ rc = policydb_load_isids(&newpolicydb, &newsidtab);
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
policydb_destroy(&newpolicydb);
- return -ENOMEM;
+ return rc;
}
if (selinux_set_mapping(&newpolicydb, secclass_map,
@@ -1800,8 +1831,12 @@
args.oldp = &policydb;
args.newp = &newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
- if (rc)
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to convert the internal"
+ " representation of contexts in the new SID"
+ " table\n");
goto err;
+ }
/* Save the old policydb and SID table to free later. */
memcpy(&oldpolicydb, &policydb, sizeof policydb);
@@ -2397,7 +2432,7 @@
u32 len;
int rc = 0;
- if (!ss_initialized || !selinux_mls_enabled) {
+ if (!ss_initialized || !policydb.mls_enabled) {
*new_sid = sid;
goto out;
}
@@ -2498,7 +2533,7 @@
/* we don't need to check ss_initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
* security server was initialized and ss_initialized was true */
- if (!selinux_mls_enabled) {
+ if (!policydb.mls_enabled) {
*peer_sid = SECSID_NULL;
return 0;
}
@@ -2555,7 +2590,7 @@
read_lock(&policy_rwlock);
*nclasses = policydb.p_classes.nprim;
- *classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
+ *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
@@ -2602,7 +2637,7 @@
}
*nperms = match->permissions.nprim;
- *perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
+ *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 529c9ca..a5721b3 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -157,12 +157,12 @@
*
* Returns 0 on success, error code otherwise.
*/
-static int smack_syslog(int type)
+static int smack_syslog(int type, bool from_file)
{
int rc;
char *sp = current_security();
- rc = cap_syslog(type);
+ rc = cap_syslog(type, from_file);
if (rc != 0)
return rc;
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 10ccd68..60a9e20 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1 +1 @@
-obj-y = common.o realpath.o tomoyo.o domain.o file.o
+obj-y = common.o realpath.o tomoyo.o domain.o file.o gc.o
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index be1099b..8ccf129 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -12,9 +12,10 @@
#include <linux/uaccess.h>
#include <linux/security.h>
#include <linux/hardirq.h>
-#include "realpath.h"
#include "common.h"
-#include "tomoyo.h"
+
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
/* Has loading policy done? */
bool tomoyo_policy_loaded;
@@ -178,14 +179,12 @@
* 1 = must / -1 = must not / 0 = don't care
* @end_type: Should the pathname end with '/'?
* 1 = must / -1 = must not / 0 = don't care
- * @function: The name of function calling me.
*
* Check whether the given filename follows the naming rules.
* Returns true if @filename follows the naming rules, false otherwise.
*/
bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function)
+ const s8 pattern_type, const s8 end_type)
{
const char *const start = filename;
bool in_repetition = false;
@@ -193,7 +192,6 @@
unsigned char c;
unsigned char d;
unsigned char e;
- const char *original_filename = filename;
if (!filename)
goto out;
@@ -282,25 +280,20 @@
goto out;
return true;
out:
- printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
- original_filename);
return false;
}
/**
* tomoyo_is_correct_domain - Check whether the given domainname follows the naming rules.
* @domainname: The domainname to check.
- * @function: The name of function calling me.
*
* Returns true if @domainname follows the naming rules, false otherwise.
*/
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function)
+bool tomoyo_is_correct_domain(const unsigned char *domainname)
{
unsigned char c;
unsigned char d;
unsigned char e;
- const char *org_domainname = domainname;
if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME,
TOMOYO_ROOT_NAME_LEN))
@@ -343,8 +336,6 @@
} while (*domainname);
return true;
out:
- printk(KERN_DEBUG "%s: Invalid domainname '%s'\n", function,
- org_domainname);
return false;
}
@@ -365,10 +356,9 @@
*
* @domainname: The domainname to find.
*
- * Caller must call down_read(&tomoyo_domain_list_lock); or
- * down_write(&tomoyo_domain_list_lock); .
- *
* Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
{
@@ -377,7 +367,7 @@
name.name = domainname;
tomoyo_fill_path_info(&name);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
@@ -748,7 +738,7 @@
*
* Returns the tomoyo_realpath() of current process on success, NULL otherwise.
*
- * This function uses tomoyo_alloc(), so the caller must call tomoyo_free()
+ * This function uses kzalloc(), so the caller must call kfree()
* if this function didn't return NULL.
*/
static const char *tomoyo_get_exe(void)
@@ -829,6 +819,8 @@
* @domain: Pointer to "struct tomoyo_domain_info".
*
* Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
{
@@ -837,61 +829,29 @@
if (!domain)
return true;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (ptr->type & TOMOYO_ACL_DELETED)
- continue;
- switch (tomoyo_acl_type2(ptr)) {
- struct tomoyo_single_path_acl_record *acl1;
- struct tomoyo_double_path_acl_record *acl2;
- u16 perm;
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- acl1 = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- perm = acl1->perm;
- if (perm & (1 << TOMOYO_TYPE_EXECUTE_ACL))
- count++;
- if (perm &
- ((1 << TOMOYO_TYPE_READ_ACL) |
- (1 << TOMOYO_TYPE_WRITE_ACL)))
- count++;
- if (perm & (1 << TOMOYO_TYPE_CREATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_UNLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RMDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKFIFO_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKSOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKBLOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKCHAR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_TRUNCATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_SYMLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_REWRITE_ACL))
- count++;
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ switch (ptr->type) {
+ struct tomoyo_path_acl *acl;
+ u32 perm;
+ u8 i;
+ case TOMOYO_TYPE_PATH_ACL:
+ acl = container_of(ptr, struct tomoyo_path_acl, head);
+ perm = acl->perm | (((u32) acl->perm_high) << 16);
+ for (i = 0; i < TOMOYO_MAX_PATH_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
+ if (perm & (1 << TOMOYO_TYPE_READ_WRITE))
+ count -= 2;
break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- acl2 = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- perm = acl2->perm;
- if (perm & (1 << TOMOYO_TYPE_LINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RENAME_ACL))
- count++;
+ case TOMOYO_TYPE_PATH2_ACL:
+ perm = container_of(ptr, struct tomoyo_path2_acl, head)
+ ->perm;
+ for (i = 0; i < TOMOYO_MAX_PATH2_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
break;
}
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY))
return true;
if (!domain->quota_warned) {
@@ -923,9 +883,11 @@
ptr = tomoyo_profile_ptr[profile];
if (ptr)
goto ok;
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!tomoyo_memory_ok(ptr)) {
+ kfree(ptr);
goto ok;
+ }
for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++)
ptr->value[i] = tomoyo_control_array[i].current_value;
mb(); /* Avoid out-of-order execution. */
@@ -966,7 +928,9 @@
return -EINVAL;
*cp = '\0';
if (!strcmp(data, "COMMENT")) {
- profile->comment = tomoyo_save_name(cp + 1);
+ const struct tomoyo_path_info *old_comment = profile->comment;
+ profile->comment = tomoyo_get_name(cp + 1);
+ tomoyo_put_name(old_comment);
return 0;
}
for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) {
@@ -1061,27 +1025,6 @@
}
/*
- * tomoyo_policy_manager_entry is a structure which is used for holding list of
- * domainnames or programs which are permitted to modify configuration via
- * /sys/kernel/security/tomoyo/ interface.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_policy_manager_list .
- * (2) "manager" is a domainname or a program's pathname.
- * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
- * otherwise.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_policy_manager_entry {
- struct list_head list;
- /* A path to program or a domainname. */
- const struct tomoyo_path_info *manager;
- bool is_domain; /* True if manager is a domainname. */
- bool is_deleted; /* True if this entry is deleted. */
-};
-
-/*
* tomoyo_policy_manager_list is used for holding list of domainnames or
* programs which are permitted to modify configuration via
* /sys/kernel/security/tomoyo/ interface.
@@ -1111,8 +1054,7 @@
*
* # cat /sys/kernel/security/tomoyo/manager
*/
-static LIST_HEAD(tomoyo_policy_manager_list);
-static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
+LIST_HEAD(tomoyo_policy_manager_list);
/**
* tomoyo_update_manager_entry - Add a manager entry.
@@ -1121,48 +1063,50 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
{
- struct tomoyo_policy_manager_entry *new_entry;
+ struct tomoyo_policy_manager_entry *entry = NULL;
struct tomoyo_policy_manager_entry *ptr;
const struct tomoyo_path_info *saved_manager;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_domain = false;
if (tomoyo_is_domain_def(manager)) {
- if (!tomoyo_is_correct_domain(manager, __func__))
+ if (!tomoyo_is_correct_domain(manager))
return -EINVAL;
is_domain = true;
} else {
- if (!tomoyo_is_correct_path(manager, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(manager, 1, -1, -1))
return -EINVAL;
}
- saved_manager = tomoyo_save_name(manager);
+ saved_manager = tomoyo_get_name(manager);
if (!saved_manager)
return -ENOMEM;
- down_write(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (ptr->manager != saved_manager)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->manager = saved_manager;
+ saved_manager = NULL;
+ entry->is_domain = is_domain;
+ list_add_tail_rcu(&entry->list, &tomoyo_policy_manager_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->manager = saved_manager;
- new_entry->is_domain = is_domain;
- list_add_tail(&new_entry->list, &tomoyo_policy_manager_list);
- error = 0;
- out:
- up_write(&tomoyo_policy_manager_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_manager);
+ kfree(entry);
return error;
}
@@ -1172,6 +1116,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1191,6 +1137,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1199,7 +1147,6 @@
if (head->read_eof)
return 0;
- down_read(&tomoyo_policy_manager_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_policy_manager_list) {
struct tomoyo_policy_manager_entry *ptr;
@@ -1211,7 +1158,6 @@
if (!done)
break;
}
- up_read(&tomoyo_policy_manager_list_lock);
head->read_eof = done;
return 0;
}
@@ -1221,6 +1167,8 @@
*
* Returns true if the current process is permitted to modify policy
* via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_policy_manager(void)
{
@@ -1234,29 +1182,25 @@
return true;
if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && ptr->is_domain
&& !tomoyo_pathcmp(domainname, ptr->manager)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (found)
return true;
exe = tomoyo_get_exe();
if (!exe)
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && !ptr->is_domain
&& !strcmp(exe, ptr->manager->name)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
@@ -1266,7 +1210,7 @@
last_pid = pid;
}
}
- tomoyo_free(exe);
+ kfree(exe);
return found;
}
@@ -1277,6 +1221,8 @@
* @data: String to parse.
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
const char *data)
@@ -1286,17 +1232,16 @@
if (sscanf(data, "pid=%u", &pid) == 1) {
struct task_struct *p;
+ rcu_read_lock();
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
- if (tomoyo_is_domain_def(data + 7)) {
- down_read(&tomoyo_domain_list_lock);
+ if (tomoyo_is_domain_def(data + 7))
domain = tomoyo_find_domain(data + 7);
- up_read(&tomoyo_domain_list_lock);
- }
} else
return false;
head->write_var1 = domain;
@@ -1310,13 +1255,11 @@
if (domain) {
struct tomoyo_domain_info *d;
head->read_var1 = NULL;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(d, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(d, &tomoyo_domain_list, list) {
if (d == domain)
break;
head->read_var1 = &d->list;
}
- up_read(&tomoyo_domain_list_lock);
head->read_var2 = NULL;
head->read_bit = 0;
head->read_step = 0;
@@ -1332,6 +1275,8 @@
* @domainname: The name of domain.
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_delete_domain(char *domainname)
{
@@ -1340,9 +1285,9 @@
name.name = domainname;
tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
/* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -1352,7 +1297,7 @@
domain->is_deleted = true;
break;
}
- up_write(&tomoyo_domain_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return 0;
}
@@ -1362,6 +1307,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1384,11 +1331,9 @@
domain = NULL;
if (is_delete)
tomoyo_delete_domain(data);
- else if (is_select) {
- down_read(&tomoyo_domain_list_lock);
+ else if (is_select)
domain = tomoyo_find_domain(data);
- up_read(&tomoyo_domain_list_lock);
- } else
+ else
domain = tomoyo_find_or_assign_new_domain(data, 0);
head->write_var1 = domain;
return 0;
@@ -1403,42 +1348,38 @@
return 0;
}
if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) {
- tomoyo_set_domain_flag(domain, is_delete,
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ);
+ domain->ignore_global_allow_read = !is_delete;
return 0;
}
return tomoyo_write_file_policy(data, domain, is_delete);
}
/**
- * tomoyo_print_single_path_acl - Print a single path ACL entry.
+ * tomoyo_print_path_acl - Print a single path ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_single_path_acl_record".
+ * @ptr: Pointer to "struct tomoyo_path_acl".
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_single_path_acl_record *
- ptr)
+static bool tomoyo_print_path_acl(struct tomoyo_io_buffer *head,
+ struct tomoyo_path_acl *ptr)
{
int pos;
u8 bit;
const char *filename;
- const u16 perm = ptr->perm;
+ const u32 perm = ptr->perm | (((u32) ptr->perm_high) << 16);
filename = ptr->filename->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_SINGLE_PATH_OPERATION;
- bit++) {
+ for (bit = head->read_bit; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
const char *msg;
if (!(perm & (1 << bit)))
continue;
/* Print "read/write" instead of "read" and "write". */
- if ((bit == TOMOYO_TYPE_READ_ACL ||
- bit == TOMOYO_TYPE_WRITE_ACL)
- && (perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
+ if ((bit == TOMOYO_TYPE_READ || bit == TOMOYO_TYPE_WRITE)
+ && (perm & (1 << TOMOYO_TYPE_READ_WRITE)))
continue;
- msg = tomoyo_sp2keyword(bit);
+ msg = tomoyo_path2keyword(bit);
pos = head->read_avail;
if (!tomoyo_io_printf(head, "allow_%s %s\n", msg, filename))
goto out;
@@ -1452,16 +1393,15 @@
}
/**
- * tomoyo_print_double_path_acl - Print a double path ACL entry.
+ * tomoyo_print_path2_acl - Print a double path ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_double_path_acl_record".
+ * @ptr: Pointer to "struct tomoyo_path2_acl".
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_double_path_acl_record *
- ptr)
+static bool tomoyo_print_path2_acl(struct tomoyo_io_buffer *head,
+ struct tomoyo_path2_acl *ptr)
{
int pos;
const char *filename1;
@@ -1471,12 +1411,11 @@
filename1 = ptr->filename1->name;
filename2 = ptr->filename2->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_DOUBLE_PATH_OPERATION;
- bit++) {
+ for (bit = head->read_bit; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
const char *msg;
if (!(perm & (1 << bit)))
continue;
- msg = tomoyo_dp2keyword(bit);
+ msg = tomoyo_path22keyword(bit);
pos = head->read_avail;
if (!tomoyo_io_printf(head, "allow_%s %s %s\n", msg,
filename1, filename2))
@@ -1501,23 +1440,17 @@
static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_acl_info *ptr)
{
- const u8 acl_type = tomoyo_acl_type2(ptr);
+ const u8 acl_type = ptr->type;
- if (acl_type & TOMOYO_ACL_DELETED)
- return true;
- if (acl_type == TOMOYO_TYPE_SINGLE_PATH_ACL) {
- struct tomoyo_single_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- return tomoyo_print_single_path_acl(head, acl);
+ if (acl_type == TOMOYO_TYPE_PATH_ACL) {
+ struct tomoyo_path_acl *acl
+ = container_of(ptr, struct tomoyo_path_acl, head);
+ return tomoyo_print_path_acl(head, acl);
}
- if (acl_type == TOMOYO_TYPE_DOUBLE_PATH_ACL) {
- struct tomoyo_double_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- return tomoyo_print_double_path_acl(head, acl);
+ if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
+ struct tomoyo_path2_acl *acl
+ = container_of(ptr, struct tomoyo_path2_acl, head);
+ return tomoyo_print_path2_acl(head, acl);
}
BUG(); /* This must not happen. */
return false;
@@ -1529,6 +1462,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1540,7 +1475,6 @@
return 0;
if (head->read_step == 0)
head->read_step = 1;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
const char *quota_exceeded = "";
@@ -1554,10 +1488,9 @@
/* Print domainname and flags. */
if (domain->quota_warned)
quota_exceeded = "quota_exceeded\n";
- if (domain->flags & TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED)
+ if (domain->transition_failed)
transition_failed = "transition_failed\n";
- if (domain->flags &
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ)
+ if (domain->ignore_global_allow_read)
ignore_global_allow_read
= TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "\n";
done = tomoyo_io_printf(head, "%s\n" TOMOYO_KEYWORD_USE_PROFILE
@@ -1573,7 +1506,6 @@
if (head->read_step == 3)
goto tail_mark;
/* Print ACL entries in the domain. */
- down_read(&tomoyo_domain_acl_info_list_lock);
list_for_each_cookie(apos, head->read_var2,
&domain->acl_info_list) {
struct tomoyo_acl_info *ptr
@@ -1583,7 +1515,6 @@
if (!done)
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (!done)
break;
head->read_step = 3;
@@ -1595,7 +1526,6 @@
if (head->read_single_domain)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1611,6 +1541,8 @@
*
* ( echo "select " $domainname; echo "use_profile " $profile ) |
* /usr/lib/ccs/loadpolicy -d
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1622,9 +1554,7 @@
if (!cp)
return -EINVAL;
*cp = '\0';
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(cp + 1);
- up_read(&tomoyo_domain_list_lock);
if (strict_strtoul(data, 10, &profile))
return -EINVAL;
if (domain && profile < TOMOYO_MAX_PROFILES
@@ -1646,6 +1576,8 @@
* awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" )
* domainname = $0; } else if ( $1 == "use_profile" ) {
* print $2 " " domainname; domainname = ""; } } ; '
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1654,7 +1586,6 @@
if (head->read_eof)
return 0;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
domain = list_entry(pos, struct tomoyo_domain_info, list);
@@ -1665,7 +1596,6 @@
if (!done)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1703,11 +1633,13 @@
const int pid = head->read_step;
struct task_struct *p;
struct tomoyo_domain_info *domain = NULL;
+ rcu_read_lock();
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (domain)
tomoyo_io_printf(head, "%d %u %s", pid, domain->profile,
domain->domainname->name);
@@ -1722,6 +1654,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1756,6 +1690,8 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, -EINVAL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1885,15 +1821,13 @@
tomoyo_policy_loaded = true;
{ /* Check all profiles currently assigned to domains are defined. */
struct tomoyo_domain_info *domain;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
if (tomoyo_profile_ptr[profile])
continue;
panic("Profile %u (used by '%s') not defined.\n",
profile, domain->domainname->name);
}
- up_read(&tomoyo_domain_list_lock);
}
}
@@ -1941,10 +1875,12 @@
* @file: Pointer to "struct file".
*
* Associates policy handler and returns 0 on success, -ENOMEM otherwise.
+ *
+ * Caller acquires tomoyo_read_lock().
*/
static int tomoyo_open_control(const u8 type, struct file *file)
{
- struct tomoyo_io_buffer *head = tomoyo_alloc(sizeof(*head));
+ struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return -ENOMEM;
@@ -2005,9 +1941,9 @@
} else {
if (!head->readbuf_size)
head->readbuf_size = 4096 * 2;
- head->read_buf = tomoyo_alloc(head->readbuf_size);
+ head->read_buf = kzalloc(head->readbuf_size, GFP_KERNEL);
if (!head->read_buf) {
- tomoyo_free(head);
+ kfree(head);
return -ENOMEM;
}
}
@@ -2019,13 +1955,14 @@
head->write = NULL;
} else if (head->write) {
head->writebuf_size = 4096 * 2;
- head->write_buf = tomoyo_alloc(head->writebuf_size);
+ head->write_buf = kzalloc(head->writebuf_size, GFP_KERNEL);
if (!head->write_buf) {
- tomoyo_free(head->read_buf);
- tomoyo_free(head);
+ kfree(head->read_buf);
+ kfree(head);
return -ENOMEM;
}
}
+ head->reader_idx = tomoyo_read_lock();
file->private_data = head;
/*
* Call the handler now if the file is
@@ -2047,6 +1984,8 @@
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_control(struct file *file, char __user *buffer,
const int buffer_len)
@@ -2090,6 +2029,8 @@
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_control(struct file *file, const char __user *buffer,
const int buffer_len)
@@ -2140,52 +2081,29 @@
* @file: Pointer to "struct file".
*
* Releases memory and returns 0.
+ *
+ * Caller looses tomoyo_read_lock().
*/
static int tomoyo_close_control(struct file *file)
{
struct tomoyo_io_buffer *head = file->private_data;
+ const bool is_write = !!head->write_buf;
+ tomoyo_read_unlock(head->reader_idx);
/* Release memory used for policy I/O. */
- tomoyo_free(head->read_buf);
+ kfree(head->read_buf);
head->read_buf = NULL;
- tomoyo_free(head->write_buf);
+ kfree(head->write_buf);
head->write_buf = NULL;
- tomoyo_free(head);
+ kfree(head);
head = NULL;
file->private_data = NULL;
+ if (is_write)
+ tomoyo_run_gc();
return 0;
}
/**
- * tomoyo_alloc_acl_element - Allocate permanent memory for ACL entry.
- *
- * @acl_type: Type of ACL entry.
- *
- * Returns pointer to the ACL entry on success, NULL otherwise.
- */
-void *tomoyo_alloc_acl_element(const u8 acl_type)
-{
- int len;
- struct tomoyo_acl_info *ptr;
-
- switch (acl_type) {
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- len = sizeof(struct tomoyo_single_path_acl_record);
- break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- len = sizeof(struct tomoyo_double_path_acl_record);
- break;
- default:
- return NULL;
- }
- ptr = tomoyo_alloc_element(len);
- if (!ptr)
- return NULL;
- ptr->type = acl_type;
- return ptr;
-}
-
-/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 92169d2..67bd22d 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1,12 +1,9 @@
/*
* security/tomoyo/common.h
*
- * Common functions for TOMOYO.
+ * Header file for TOMOYO.
*
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2010 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -22,9 +19,119 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/list.h>
+#include <linux/cred.h>
+struct linux_binprm;
-struct dentry;
-struct vfsmount;
+/********** Constants definitions. **********/
+
+/*
+ * TOMOYO uses this hash only when appending a string into the string
+ * table. Frequency of appending strings is very low. So we don't need
+ * large (e.g. 64k) hash size. 256 will be sufficient.
+ */
+#define TOMOYO_HASH_BITS 8
+#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+
+/*
+ * This is the max length of a token.
+ *
+ * A token consists of only ASCII printable characters.
+ * Non printable characters in a token is represented in \ooo style
+ * octal string. Thus, \ itself is represented as \\.
+ */
+#define TOMOYO_MAX_PATHNAME_LEN 4000
+
+/* Profile number is an integer between 0 and 255. */
+#define TOMOYO_MAX_PROFILES 256
+
+/* Keywords for ACLs. */
+#define TOMOYO_KEYWORD_ALIAS "alias "
+#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
+#define TOMOYO_KEYWORD_DELETE "delete "
+#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
+#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
+#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
+#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
+#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
+#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
+#define TOMOYO_KEYWORD_SELECT "select "
+#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
+#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
+/* A domain definition starts with <kernel>. */
+#define TOMOYO_ROOT_NAME "<kernel>"
+#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
+
+/* Index numbers for Access Controls. */
+enum tomoyo_mac_index {
+ TOMOYO_MAC_FOR_FILE, /* domain_policy.conf */
+ TOMOYO_MAX_ACCEPT_ENTRY,
+ TOMOYO_VERBOSE,
+ TOMOYO_MAX_CONTROL_INDEX
+};
+
+/* Index numbers for Access Controls. */
+enum tomoyo_acl_entry_type_index {
+ TOMOYO_TYPE_PATH_ACL,
+ TOMOYO_TYPE_PATH2_ACL,
+};
+
+/* Index numbers for File Controls. */
+
+/*
+ * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
+ * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
+ * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
+ * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
+ * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
+ * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
+ */
+
+enum tomoyo_path_acl_index {
+ TOMOYO_TYPE_READ_WRITE,
+ TOMOYO_TYPE_EXECUTE,
+ TOMOYO_TYPE_READ,
+ TOMOYO_TYPE_WRITE,
+ TOMOYO_TYPE_CREATE,
+ TOMOYO_TYPE_UNLINK,
+ TOMOYO_TYPE_MKDIR,
+ TOMOYO_TYPE_RMDIR,
+ TOMOYO_TYPE_MKFIFO,
+ TOMOYO_TYPE_MKSOCK,
+ TOMOYO_TYPE_MKBLOCK,
+ TOMOYO_TYPE_MKCHAR,
+ TOMOYO_TYPE_TRUNCATE,
+ TOMOYO_TYPE_SYMLINK,
+ TOMOYO_TYPE_REWRITE,
+ TOMOYO_TYPE_IOCTL,
+ TOMOYO_TYPE_CHMOD,
+ TOMOYO_TYPE_CHOWN,
+ TOMOYO_TYPE_CHGRP,
+ TOMOYO_TYPE_CHROOT,
+ TOMOYO_TYPE_MOUNT,
+ TOMOYO_TYPE_UMOUNT,
+ TOMOYO_MAX_PATH_OPERATION
+};
+
+enum tomoyo_path2_acl_index {
+ TOMOYO_TYPE_LINK,
+ TOMOYO_TYPE_RENAME,
+ TOMOYO_TYPE_PIVOT_ROOT,
+ TOMOYO_MAX_PATH2_OPERATION
+};
+
+enum tomoyo_securityfs_interface_index {
+ TOMOYO_DOMAINPOLICY,
+ TOMOYO_EXCEPTIONPOLICY,
+ TOMOYO_DOMAIN_STATUS,
+ TOMOYO_PROCESS_STATUS,
+ TOMOYO_MEMINFO,
+ TOMOYO_SELFDOMAIN,
+ TOMOYO_VERSION,
+ TOMOYO_PROFILE,
+ TOMOYO_MANAGER
+};
+
+/********** Structure definitions. **********/
/*
* tomoyo_page_buffer is a structure which is used for holding a pathname
@@ -66,13 +173,14 @@
};
/*
- * This is the max length of a token.
- *
- * A token consists of only ASCII printable characters.
- * Non printable characters in a token is represented in \ooo style
- * octal string. Thus, \ itself is represented as \\.
+ * tomoyo_name_entry is a structure which is used for linking
+ * "struct tomoyo_path_info" into tomoyo_name_list .
*/
-#define TOMOYO_MAX_PATHNAME_LEN 4000
+struct tomoyo_name_entry {
+ struct list_head list;
+ atomic_t users;
+ struct tomoyo_path_info entry;
+};
/*
* tomoyo_path_info_with_data is a structure which is used for holding a
@@ -89,7 +197,7 @@
* "struct tomoyo_path_info_with_data".
*/
struct tomoyo_path_info_with_data {
- /* Keep "head" first, for this pointer is passed to tomoyo_free(). */
+ /* Keep "head" first, for this pointer is passed to kfree(). */
struct tomoyo_path_info head;
char barrier1[16]; /* Safeguard for overrun. */
char body[TOMOYO_MAX_PATHNAME_LEN];
@@ -101,30 +209,19 @@
*
* (1) "list" which is linked to the ->acl_info_list of
* "struct tomoyo_domain_info"
- * (2) "type" which tells
- * (a) type & 0x7F : type of the entry (either
- * "struct tomoyo_single_path_acl_record" or
- * "struct tomoyo_double_path_acl_record")
- * (b) type & 0x80 : whether the entry is marked as "deleted".
+ * (2) "type" which tells type of the entry (either
+ * "struct tomoyo_path_acl" or "struct tomoyo_path2_acl").
*
* Packing "struct tomoyo_acl_info" allows
- * "struct tomoyo_single_path_acl_record" to embed "u16" and
- * "struct tomoyo_double_path_acl_record" to embed "u8"
+ * "struct tomoyo_path_acl" to embed "u8" + "u16" and
+ * "struct tomoyo_path2_acl" to embed "u8"
* without enlarging their structure size.
*/
struct tomoyo_acl_info {
struct list_head list;
- /*
- * Type of this ACL entry.
- *
- * MSB is is_deleted flag.
- */
u8 type;
} __packed;
-/* This ACL entry is deleted. */
-#define TOMOYO_ACL_DELETED 0x80
-
/*
* tomoyo_domain_info is a structure which is used for holding permissions
* (e.g. "allow_read /lib/libc-2.5.so") given to each domain.
@@ -138,7 +235,17 @@
* "deleted", false otherwise.
* (6) "quota_warned" is a bool which is used for suppressing warning message
* when learning mode learned too much entries.
- * (7) "flags" which remembers this domain's attributes.
+ * (7) "ignore_global_allow_read" is a bool which is true if this domain
+ * should ignore "allow_read" directive in exception policy.
+ * (8) "transition_failed" is a bool which is set to true when this domain was
+ * unable to create a new domain at tomoyo_find_next_domain() because the
+ * name of the domain to be created was too long or it could not allocate
+ * memory. If set to true, more than one process continued execve()
+ * without domain transition.
+ * (9) "users" is an atomic_t that holds how many "struct cred"->security
+ * are referring this "struct tomoyo_domain_info". If is_deleted == true
+ * and users == 0, this struct will be kfree()d upon next garbage
+ * collection.
*
* A domain's lifecycle is an analogy of files on / directory.
* Multiple domains with the same domainname cannot be created (as with
@@ -155,25 +262,13 @@
u8 profile; /* Profile number to use. */
bool is_deleted; /* Delete flag. */
bool quota_warned; /* Quota warnning flag. */
- /* DOMAIN_FLAGS_*. Use tomoyo_set_domain_flag() to modify. */
- u8 flags;
+ bool ignore_global_allow_read; /* Ignore "allow_read" flag. */
+ bool transition_failed; /* Domain transition failed flag. */
+ atomic_t users; /* Number of referring credentials. */
};
-/* Profile number is an integer between 0 and 255. */
-#define TOMOYO_MAX_PROFILES 256
-
-/* Ignore "allow_read" directive in exception policy. */
-#define TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ 1
/*
- * This domain was unable to create a new domain at tomoyo_find_next_domain()
- * because the name of the domain to be created was too long or
- * it could not allocate memory.
- * More than one process continued execve() without domain transition.
- */
-#define TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED 2
-
-/*
- * tomoyo_single_path_acl_record is a structure which is used for holding an
+ * tomoyo_path_acl is a structure which is used for holding an
* entry with one pathname operation (e.g. open(), mkdir()).
* It has following fields.
*
@@ -184,18 +279,21 @@
* Directives held by this structure are "allow_read/write", "allow_execute",
* "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir",
* "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock",
- * "allow_mkchar", "allow_truncate", "allow_symlink" and "allow_rewrite".
+ * "allow_mkchar", "allow_truncate", "allow_symlink", "allow_rewrite",
+ * "allow_chmod", "allow_chown", "allow_chgrp", "allow_chroot", "allow_mount"
+ * and "allow_unmount".
*/
-struct tomoyo_single_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */
+struct tomoyo_path_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
+ u8 perm_high;
u16 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename;
};
/*
- * tomoyo_double_path_acl_record is a structure which is used for holding an
- * entry with two pathnames operation (i.e. link() and rename()).
+ * tomoyo_path2_acl is a structure which is used for holding an
+ * entry with two pathnames operation (i.e. link(), rename() and pivot_root()).
* It has following fields.
*
* (1) "head" which is a "struct tomoyo_acl_info".
@@ -203,10 +301,11 @@
* (3) "filename1" is the source/old pathname.
* (4) "filename2" is the destination/new pathname.
*
- * Directives held by this structure are "allow_rename" and "allow_link".
+ * Directives held by this structure are "allow_rename", "allow_link" and
+ * "allow_pivot_root".
*/
-struct tomoyo_double_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */
+struct tomoyo_path2_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
u8 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename1;
@@ -214,29 +313,6 @@
const struct tomoyo_path_info *filename2;
};
-/* Keywords for ACLs. */
-#define TOMOYO_KEYWORD_ALIAS "alias "
-#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
-#define TOMOYO_KEYWORD_DELETE "delete "
-#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
-#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
-#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
-#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
-#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
-#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
-#define TOMOYO_KEYWORD_SELECT "select "
-#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
-#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
-/* A domain definition starts with <kernel>. */
-#define TOMOYO_ROOT_NAME "<kernel>"
-#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
-
-/* Index numbers for Access Controls. */
-#define TOMOYO_MAC_FOR_FILE 0 /* domain_policy.conf */
-#define TOMOYO_MAX_ACCEPT_ENTRY 1
-#define TOMOYO_VERBOSE 2
-#define TOMOYO_MAX_CONTROL_INDEX 3
-
/*
* tomoyo_io_buffer is a structure which is used for reading and modifying
* configuration via /sys/kernel/security/tomoyo/ interface.
@@ -265,6 +341,8 @@
int (*write) (struct tomoyo_io_buffer *);
/* Exclusive lock for this structure. */
struct mutex io_sem;
+ /* Index returned by tomoyo_read_lock(). */
+ int reader_idx;
/* The position currently reading from. */
struct list_head *read_var1;
/* Extra variables for reading. */
@@ -293,18 +371,159 @@
int writebuf_size;
};
+/*
+ * tomoyo_globally_readable_file_entry is a structure which is used for holding
+ * "allow_read" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_globally_readable_list .
+ * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_globally_readable_file_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *filename;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_pattern_entry is a structure which is used for holding
+ * "tomoyo_pattern_list" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_pattern_list .
+ * (2) "pattern" is a pathname pattern which is used for converting pathnames
+ * to pathname patterns during learning mode.
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_pattern_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *pattern;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_no_rewrite_entry is a structure which is used for holding
+ * "deny_rewrite" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_no_rewrite_list .
+ * (2) "pattern" is a pathname which is by default not permitted to modify
+ * already existing content.
+ * (3) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_no_rewrite_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *pattern;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_domain_initializer_entry is a structure which is used for holding
+ * "initialize_domain" and "no_initialize_domain" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_domain_initializer_list .
+ * (2) "domainname" which is "a domainname" or "the last component of a
+ * domainname". This field is NULL if "from" clause is not specified.
+ * (3) "program" which is a program's pathname.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ * (5) "is_not" is a bool which is true if "no_initialize_domain", false
+ * otherwise.
+ * (6) "is_last_name" is a bool which is true if "domainname" is "the last
+ * component of a domainname", false otherwise.
+ */
+struct tomoyo_domain_initializer_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *domainname; /* This may be NULL */
+ const struct tomoyo_path_info *program;
+ bool is_deleted;
+ bool is_not; /* True if this entry is "no_initialize_domain". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+};
+
+/*
+ * tomoyo_domain_keeper_entry is a structure which is used for holding
+ * "keep_domain" and "no_keep_domain" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_domain_keeper_list .
+ * (2) "domainname" which is "a domainname" or "the last component of a
+ * domainname".
+ * (3) "program" which is a program's pathname.
+ * This field is NULL if "from" clause is not specified.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ * (5) "is_not" is a bool which is true if "no_initialize_domain", false
+ * otherwise.
+ * (6) "is_last_name" is a bool which is true if "domainname" is "the last
+ * component of a domainname", false otherwise.
+ */
+struct tomoyo_domain_keeper_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *domainname;
+ const struct tomoyo_path_info *program; /* This may be NULL */
+ bool is_deleted;
+ bool is_not; /* True if this entry is "no_keep_domain". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+};
+
+/*
+ * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_alias_list .
+ * (2) "original_name" which is a dereferenced pathname.
+ * (3) "aliased_name" which is a symlink's pathname.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_alias_entry {
+ struct list_head list;
+ const struct tomoyo_path_info *original_name;
+ const struct tomoyo_path_info *aliased_name;
+ bool is_deleted;
+};
+
+/*
+ * tomoyo_policy_manager_entry is a structure which is used for holding list of
+ * domainnames or programs which are permitted to modify configuration via
+ * /sys/kernel/security/tomoyo/ interface.
+ * It has following fields.
+ *
+ * (1) "list" which is linked to tomoyo_policy_manager_list .
+ * (2) "manager" is a domainname or a program's pathname.
+ * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
+ * otherwise.
+ * (4) "is_deleted" is a bool which is true if marked as deleted, false
+ * otherwise.
+ */
+struct tomoyo_policy_manager_entry {
+ struct list_head list;
+ /* A path to program or a domainname. */
+ const struct tomoyo_path_info *manager;
+ bool is_domain; /* True if manager is a domainname. */
+ bool is_deleted; /* True if this entry is deleted. */
+};
+
+/********** Function prototypes. **********/
+
/* Check whether the domain has too many ACL entries to hold. */
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain);
/* Transactional sprintf() for policy dump. */
bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
/* Check whether the domainname is correct. */
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function);
+bool tomoyo_is_correct_domain(const unsigned char *domainname);
/* Check whether the token is correct. */
bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function);
+ const s8 pattern_type, const s8 end_type);
/* Check whether the token can be a domainname. */
bool tomoyo_is_domain_def(const unsigned char *buffer);
/* Check whether the given filename matches the given pattern. */
@@ -328,13 +547,13 @@
/* Write domain policy violation warning message to console? */
bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain);
/* Convert double path operation to operation name. */
-const char *tomoyo_dp2keyword(const u8 operation);
+const char *tomoyo_path22keyword(const u8 operation);
/* Get the last component of the given domainname. */
const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
/* Get warning message. */
const char *tomoyo_get_msg(const bool is_enforce);
/* Convert single path operation to operation name. */
-const char *tomoyo_sp2keyword(const u8 operation);
+const char *tomoyo_path2keyword(const u8 operation);
/* Create "alias" entry in exception policy. */
int tomoyo_write_alias_policy(char *data, const bool is_delete);
/*
@@ -370,15 +589,101 @@
/* Check mode for specified functionality. */
unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
const u8 index);
-/* Allocate memory for structures. */
-void *tomoyo_alloc_acl_element(const u8 acl_type);
/* Fill in "struct tomoyo_path_info" members. */
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
/* Run policy loader when /sbin/init starts. */
void tomoyo_load_policy(const char *filename);
-/* Change "struct tomoyo_domain_info"->flags. */
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags);
+
+/* Convert binary string to ascii string. */
+int tomoyo_encode(char *buffer, int buflen, const char *str);
+
+/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
+int tomoyo_realpath_from_path2(struct path *path, char *newname,
+ int newname_len);
+
+/*
+ * Returns realpath(3) of the given pathname but ignores chroot'ed root.
+ * These functions use kzalloc(), so the caller must call kfree()
+ * if these functions didn't return NULL.
+ */
+char *tomoyo_realpath(const char *pathname);
+/*
+ * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
+ */
+char *tomoyo_realpath_nofollow(const char *pathname);
+/* Same with tomoyo_realpath() except that the pathname is already solved. */
+char *tomoyo_realpath_from_path(struct path *path);
+
+/* Check memory quota. */
+bool tomoyo_memory_ok(void *ptr);
+
+/*
+ * Keep the given name on the RAM.
+ * The RAM is shared, so NEVER try to modify or kfree() the returned name.
+ */
+const struct tomoyo_path_info *tomoyo_get_name(const char *name);
+
+/* Check for memory usage. */
+int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
+
+/* Set memory quota. */
+int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
+
+/* Initialize realpath related code. */
+void __init tomoyo_realpath_init(void);
+int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
+ const struct tomoyo_path_info *filename);
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+ struct path *path, const int flag);
+int tomoyo_path_perm(const u8 operation, struct path *path);
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2);
+int tomoyo_check_rewrite_permission(struct file *filp);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
+
+/* Run garbage collector. */
+void tomoyo_run_gc(void);
+
+void tomoyo_memory_free(void *ptr);
+
+/********** External variable definitions. **********/
+
+/* Lock for GC. */
+extern struct srcu_struct tomoyo_ss;
+
+/* The list for "struct tomoyo_domain_info". */
+extern struct list_head tomoyo_domain_list;
+
+extern struct list_head tomoyo_domain_initializer_list;
+extern struct list_head tomoyo_domain_keeper_list;
+extern struct list_head tomoyo_alias_list;
+extern struct list_head tomoyo_globally_readable_list;
+extern struct list_head tomoyo_pattern_list;
+extern struct list_head tomoyo_no_rewrite_list;
+extern struct list_head tomoyo_policy_manager_list;
+extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+extern struct mutex tomoyo_name_list_lock;
+
+/* Lock for protecting policy. */
+extern struct mutex tomoyo_policy_lock;
+
+/* Has /sbin/init started? */
+extern bool tomoyo_policy_loaded;
+
+/* The kernel's domain. */
+extern struct tomoyo_domain_info tomoyo_kernel_domain;
+
+/********** Inlined functions. **********/
+
+static inline int tomoyo_read_lock(void)
+{
+ return srcu_read_lock(&tomoyo_ss);
+}
+
+static inline void tomoyo_read_unlock(int idx)
+{
+ srcu_read_unlock(&tomoyo_ss, idx);
+}
/* strcmp() for "struct tomoyo_path_info" structure. */
static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
@@ -387,18 +692,6 @@
return a->hash != b->hash || strcmp(a->name, b->name);
}
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type1(struct tomoyo_acl_info *ptr)
-{
- return ptr->type & ~TOMOYO_ACL_DELETED;
-}
-
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type2(struct tomoyo_acl_info *ptr)
-{
- return ptr->type;
-}
-
/**
* tomoyo_is_valid - Check whether the character is a valid char.
*
@@ -423,18 +716,25 @@
return c && (c <= ' ' || c >= 127);
}
-/* The list for "struct tomoyo_domain_info". */
-extern struct list_head tomoyo_domain_list;
-extern struct rw_semaphore tomoyo_domain_list_lock;
+static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
+{
+ if (name) {
+ struct tomoyo_name_entry *ptr =
+ container_of(name, struct tomoyo_name_entry, entry);
+ atomic_dec(&ptr->users);
+ }
+}
-/* Lock for domain->acl_info_list. */
-extern struct rw_semaphore tomoyo_domain_acl_info_list_lock;
+static inline struct tomoyo_domain_info *tomoyo_domain(void)
+{
+ return current_cred()->security;
+}
-/* Has /sbin/init started? */
-extern bool tomoyo_policy_loaded;
-
-/* The kernel's domain. */
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
+static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
+ *task)
+{
+ return task_cred_xxx(task, security);
+}
/**
* list_for_each_cookie - iterate over a list with cookie.
@@ -442,16 +742,16 @@
* @cookie: the &struct list_head to use as a cookie.
* @head: the head for your list.
*
- * Same with list_for_each() except that this primitive uses @cookie
+ * Same with list_for_each_rcu() except that this primitive uses @cookie
* so that we can continue iteration.
* @cookie must be NULL when iteration starts, and @cookie will become
* NULL when iteration finishes.
*/
-#define list_for_each_cookie(pos, cookie, head) \
- for (({ if (!cookie) \
- cookie = head; }), \
- pos = (cookie)->next; \
- prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
- (cookie) = pos, pos = pos->next)
+#define list_for_each_cookie(pos, cookie, head) \
+ for (({ if (!cookie) \
+ cookie = head; }), \
+ pos = rcu_dereference((cookie)->next); \
+ prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
+ (cookie) = pos, pos = rcu_dereference(pos->next))
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index fcf52ac..66caaa1 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -10,8 +10,6 @@
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
#include <linux/binfmts.h>
/* Variables definitions.*/
@@ -58,99 +56,6 @@
* exceptions.
*/
LIST_HEAD(tomoyo_domain_list);
-DECLARE_RWSEM(tomoyo_domain_list_lock);
-
-/*
- * tomoyo_domain_initializer_entry is a structure which is used for holding
- * "initialize_domain" and "no_initialize_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_initializer_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname". This field is NULL if "from" clause is not specified.
- * (3) "program" which is a program's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
- */
-struct tomoyo_domain_initializer_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname; /* This may be NULL */
- const struct tomoyo_path_info *program;
- bool is_deleted;
- bool is_not; /* True if this entry is "no_initialize_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
-
-/*
- * tomoyo_domain_keeper_entry is a structure which is used for holding
- * "keep_domain" and "no_keep_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_keeper_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname".
- * (3) "program" which is a program's pathname.
- * This field is NULL if "from" clause is not specified.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
- */
-struct tomoyo_domain_keeper_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname;
- const struct tomoyo_path_info *program; /* This may be NULL */
- bool is_deleted;
- bool is_not; /* True if this entry is "no_keep_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
-
-/*
- * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_alias_list .
- * (2) "original_name" which is a dereferenced pathname.
- * (3) "aliased_name" which is a symlink's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_alias_entry {
- struct list_head list;
- const struct tomoyo_path_info *original_name;
- const struct tomoyo_path_info *aliased_name;
- bool is_deleted;
-};
-
-/**
- * tomoyo_set_domain_flag - Set or clear domain's attribute flags.
- *
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
- * @flags: Flags to set or clear.
- *
- * Returns nothing.
- */
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags)
-{
- /* We need to serialize because this is bitfield operation. */
- static DEFINE_SPINLOCK(lock);
- spin_lock(&lock);
- if (!is_delete)
- domain->flags |= flags;
- else
- domain->flags &= ~flags;
- spin_unlock(&lock);
-}
/**
* tomoyo_get_last_name - Get last component of a domainname.
@@ -205,8 +110,7 @@
* will cause "/usr/sbin/httpd" to belong to "<kernel> /usr/sbin/httpd" domain
* unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain.
*/
-static LIST_HEAD(tomoyo_domain_initializer_list);
-static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
+LIST_HEAD(tomoyo_domain_initializer_list);
/**
* tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
@@ -217,59 +121,65 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_initializer_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_initializer_entry *new_entry;
+ struct tomoyo_domain_initializer_entry *entry = NULL;
struct tomoyo_domain_initializer_entry *ptr;
- const struct tomoyo_path_info *saved_program;
+ const struct tomoyo_path_info *saved_program = NULL;
const struct tomoyo_path_info *saved_domainname = NULL;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_last_name = false;
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
if (domainname) {
if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
+ tomoyo_is_correct_path(domainname, 1, -1, -1))
is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
+ else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
- saved_domainname = tomoyo_save_name(domainname);
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- return -ENOMEM;
+ goto out;
}
- saved_program = tomoyo_save_name(program);
+ saved_program = tomoyo_get_name(program);
if (!saved_program)
- return -ENOMEM;
- down_write(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->program = saved_program;
+ saved_program = NULL;
+ entry->is_not = is_not;
+ entry->is_last_name = is_last_name;
+ list_add_tail_rcu(&entry->list,
+ &tomoyo_domain_initializer_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_initializer_list_lock);
+ tomoyo_put_name(saved_domainname);
+ tomoyo_put_name(saved_program);
+ kfree(entry);
return error;
}
@@ -279,13 +189,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_initializer_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_initializer_list) {
const char *no;
@@ -308,7 +219,6 @@
if (!done)
break;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return done;
}
@@ -320,6 +230,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
const bool is_delete)
@@ -345,6 +257,8 @@
*
* Returns true if executing @program reinitializes domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
domainname,
@@ -355,8 +269,7 @@
struct tomoyo_domain_initializer_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_deleted)
continue;
if (ptr->domainname) {
@@ -376,7 +289,6 @@
}
flag = true;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return flag;
}
@@ -418,8 +330,7 @@
* "<kernel> /usr/sbin/sshd /bin/bash /usr/bin/passwd" domain, unless
* explicitly specified by "initialize_domain".
*/
-static LIST_HEAD(tomoyo_domain_keeper_list);
-static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
+LIST_HEAD(tomoyo_domain_keeper_list);
/**
* tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
@@ -430,59 +341,64 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_keeper_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
- struct tomoyo_domain_keeper_entry *new_entry;
+ struct tomoyo_domain_keeper_entry *entry = NULL;
struct tomoyo_domain_keeper_entry *ptr;
- const struct tomoyo_path_info *saved_domainname;
+ const struct tomoyo_path_info *saved_domainname = NULL;
const struct tomoyo_path_info *saved_program = NULL;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
bool is_last_name = false;
if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
+ tomoyo_is_correct_path(domainname, 1, -1, -1))
is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
+ else if (!tomoyo_is_correct_domain(domainname))
return -EINVAL;
if (program) {
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(program, 1, -1, -1))
return -EINVAL;
- saved_program = tomoyo_save_name(program);
+ saved_program = tomoyo_get_name(program);
if (!saved_program)
- return -ENOMEM;
+ goto out;
}
- saved_domainname = tomoyo_save_name(domainname);
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- return -ENOMEM;
- down_write(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->program = saved_program;
+ saved_program = NULL;
+ entry->is_not = is_not;
+ entry->is_last_name = is_last_name;
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_keeper_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_keeper_list_lock);
+ tomoyo_put_name(saved_domainname);
+ tomoyo_put_name(saved_program);
+ kfree(entry);
return error;
}
@@ -493,6 +409,7 @@
* @is_not: True if it is "no_keep_domain" entry.
* @is_delete: True if it is a delete request.
*
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
const bool is_delete)
@@ -513,13 +430,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_keeper_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_keeper_list) {
struct tomoyo_domain_keeper_entry *ptr;
@@ -542,7 +460,6 @@
if (!done)
break;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return done;
}
@@ -555,6 +472,8 @@
*
* Returns true if executing @program supresses domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
const struct tomoyo_path_info *program,
@@ -563,8 +482,7 @@
struct tomoyo_domain_keeper_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_deleted)
continue;
if (!ptr->is_last_name) {
@@ -582,7 +500,6 @@
}
flag = true;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return flag;
}
@@ -616,8 +533,7 @@
* /bin/busybox and domainname which the current process will belong to after
* execve() succeeds is calculated using /bin/cat rather than /bin/busybox .
*/
-static LIST_HEAD(tomoyo_alias_list);
-static DECLARE_RWSEM(tomoyo_alias_list_lock);
+LIST_HEAD(tomoyo_alias_list);
/**
* tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
@@ -627,46 +543,51 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_alias_entry(const char *original_name,
const char *aliased_name,
const bool is_delete)
{
- struct tomoyo_alias_entry *new_entry;
+ struct tomoyo_alias_entry *entry = NULL;
struct tomoyo_alias_entry *ptr;
const struct tomoyo_path_info *saved_original_name;
const struct tomoyo_path_info *saved_aliased_name;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(original_name, 1, -1, -1, __func__) ||
- !tomoyo_is_correct_path(aliased_name, 1, -1, -1, __func__))
+ if (!tomoyo_is_correct_path(original_name, 1, -1, -1) ||
+ !tomoyo_is_correct_path(aliased_name, 1, -1, -1))
return -EINVAL; /* No patterns allowed. */
- saved_original_name = tomoyo_save_name(original_name);
- saved_aliased_name = tomoyo_save_name(aliased_name);
+ saved_original_name = tomoyo_get_name(original_name);
+ saved_aliased_name = tomoyo_get_name(aliased_name);
if (!saved_original_name || !saved_aliased_name)
- return -ENOMEM;
- down_write(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->original_name != saved_original_name ||
ptr->aliased_name != saved_aliased_name)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->original_name = saved_original_name;
+ saved_original_name = NULL;
+ entry->aliased_name = saved_aliased_name;
+ saved_aliased_name = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_alias_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->original_name = saved_original_name;
- new_entry->aliased_name = saved_aliased_name;
- list_add_tail(&new_entry->list, &tomoyo_alias_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_alias_list_lock);
+ tomoyo_put_name(saved_original_name);
+ tomoyo_put_name(saved_aliased_name);
+ kfree(entry);
return error;
}
@@ -676,13 +597,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_alias_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
struct tomoyo_alias_entry *ptr;
@@ -695,7 +617,6 @@
if (!done)
break;
}
- up_read(&tomoyo_alias_list_lock);
return done;
}
@@ -706,6 +627,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_alias_policy(char *data, const bool is_delete)
{
@@ -724,63 +647,46 @@
* @profile: Profile number to assign if the domain was newly created.
*
* Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
domainname,
const u8 profile)
{
- struct tomoyo_domain_info *domain = NULL;
+ struct tomoyo_domain_info *entry;
+ struct tomoyo_domain_info *domain;
const struct tomoyo_path_info *saved_domainname;
+ bool found = false;
- down_write(&tomoyo_domain_list_lock);
- domain = tomoyo_find_domain(domainname);
- if (domain)
- goto out;
- if (!tomoyo_is_correct_domain(domainname, __func__))
- goto out;
- saved_domainname = tomoyo_save_name(domainname);
+ if (!tomoyo_is_correct_domain(domainname))
+ return NULL;
+ saved_domainname = tomoyo_get_name(domainname);
if (!saved_domainname)
- goto out;
- /* Can I reuse memory of deleted domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- struct task_struct *p;
- struct tomoyo_acl_info *ptr;
- bool flag;
- if (!domain->is_deleted ||
- domain->domainname != saved_domainname)
+ return NULL;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ if (domain->is_deleted ||
+ tomoyo_pathcmp(saved_domainname, domain->domainname))
continue;
- flag = false;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (tomoyo_real_domain(p) != domain)
- continue;
- flag = true;
- break;
- }
- read_unlock(&tasklist_lock);
- if (flag)
- continue;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- ptr->type |= TOMOYO_ACL_DELETED;
- }
- tomoyo_set_domain_flag(domain, true, domain->flags);
- domain->profile = profile;
- domain->quota_warned = false;
- mb(); /* Avoid out-of-order execution. */
- domain->is_deleted = false;
- goto out;
+ found = true;
+ break;
}
- /* No memory reusable. Create using new memory. */
- domain = tomoyo_alloc_element(sizeof(*domain));
- if (domain) {
- INIT_LIST_HEAD(&domain->acl_info_list);
- domain->domainname = saved_domainname;
- domain->profile = profile;
- list_add_tail(&domain->list, &tomoyo_domain_list);
+ if (!found && tomoyo_memory_ok(entry)) {
+ INIT_LIST_HEAD(&entry->acl_info_list);
+ entry->domainname = saved_domainname;
+ saved_domainname = NULL;
+ entry->profile = profile;
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
+ domain = entry;
+ entry = NULL;
+ found = true;
}
- out:
- up_write(&tomoyo_domain_list_lock);
- return domain;
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_domainname);
+ kfree(entry);
+ return found ? domain : NULL;
}
/**
@@ -789,6 +695,8 @@
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
@@ -796,7 +704,7 @@
* This function assumes that the size of buffer returned by
* tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
*/
- struct tomoyo_page_buffer *tmp = tomoyo_alloc(sizeof(*tmp));
+ struct tomoyo_page_buffer *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
struct tomoyo_domain_info *old_domain = tomoyo_domain();
struct tomoyo_domain_info *domain = NULL;
const char *old_domain_name = old_domain->domainname->name;
@@ -849,8 +757,7 @@
if (tomoyo_pathcmp(&r, &s)) {
struct tomoyo_alias_entry *ptr;
/* Is this program allowed to be called via symbolic links? */
- down_read(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->is_deleted ||
tomoyo_pathcmp(&r, ptr->original_name) ||
tomoyo_pathcmp(&s, ptr->aliased_name))
@@ -861,7 +768,6 @@
tomoyo_fill_path_info(&r);
break;
}
- up_read(&tomoyo_alias_list_lock);
}
/* Check execute permission. */
@@ -892,9 +798,7 @@
}
if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
goto done;
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(new_domain_name);
- up_read(&tomoyo_domain_list_lock);
if (domain)
goto done;
if (is_enforce)
@@ -909,14 +813,15 @@
if (is_enforce)
retval = -EPERM;
else
- tomoyo_set_domain_flag(old_domain, false,
- TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
+ old_domain->transition_failed = true;
out:
if (!domain)
domain = old_domain;
+ /* Update reference count on "struct tomoyo_domain_info". */
+ atomic_inc(&domain->users);
bprm->cred->security = domain;
- tomoyo_free(real_program_name);
- tomoyo_free(symlink_program_name);
- tomoyo_free(tmp);
+ kfree(real_program_name);
+ kfree(symlink_program_name);
+ kfree(tmp);
return retval;
}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9a6c588..1b24304 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -10,108 +10,64 @@
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
-
-/*
- * tomoyo_globally_readable_file_entry is a structure which is used for holding
- * "allow_read" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_globally_readable_list .
- * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_globally_readable_file_entry {
- struct list_head list;
- const struct tomoyo_path_info *filename;
- bool is_deleted;
-};
-
-/*
- * tomoyo_pattern_entry is a structure which is used for holding
- * "tomoyo_pattern_list" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_pattern_list .
- * (2) "pattern" is a pathname pattern which is used for converting pathnames
- * to pathname patterns during learning mode.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_pattern_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
-};
-
-/*
- * tomoyo_no_rewrite_entry is a structure which is used for holding
- * "deny_rewrite" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_no_rewrite_list .
- * (2) "pattern" is a pathname which is by default not permitted to modify
- * already existing content.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_no_rewrite_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
-};
/* Keyword array for single path operations. */
-static const char *tomoyo_sp_keyword[TOMOYO_MAX_SINGLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_READ_WRITE_ACL] = "read/write",
- [TOMOYO_TYPE_EXECUTE_ACL] = "execute",
- [TOMOYO_TYPE_READ_ACL] = "read",
- [TOMOYO_TYPE_WRITE_ACL] = "write",
- [TOMOYO_TYPE_CREATE_ACL] = "create",
- [TOMOYO_TYPE_UNLINK_ACL] = "unlink",
- [TOMOYO_TYPE_MKDIR_ACL] = "mkdir",
- [TOMOYO_TYPE_RMDIR_ACL] = "rmdir",
- [TOMOYO_TYPE_MKFIFO_ACL] = "mkfifo",
- [TOMOYO_TYPE_MKSOCK_ACL] = "mksock",
- [TOMOYO_TYPE_MKBLOCK_ACL] = "mkblock",
- [TOMOYO_TYPE_MKCHAR_ACL] = "mkchar",
- [TOMOYO_TYPE_TRUNCATE_ACL] = "truncate",
- [TOMOYO_TYPE_SYMLINK_ACL] = "symlink",
- [TOMOYO_TYPE_REWRITE_ACL] = "rewrite",
+static const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_READ_WRITE] = "read/write",
+ [TOMOYO_TYPE_EXECUTE] = "execute",
+ [TOMOYO_TYPE_READ] = "read",
+ [TOMOYO_TYPE_WRITE] = "write",
+ [TOMOYO_TYPE_CREATE] = "create",
+ [TOMOYO_TYPE_UNLINK] = "unlink",
+ [TOMOYO_TYPE_MKDIR] = "mkdir",
+ [TOMOYO_TYPE_RMDIR] = "rmdir",
+ [TOMOYO_TYPE_MKFIFO] = "mkfifo",
+ [TOMOYO_TYPE_MKSOCK] = "mksock",
+ [TOMOYO_TYPE_MKBLOCK] = "mkblock",
+ [TOMOYO_TYPE_MKCHAR] = "mkchar",
+ [TOMOYO_TYPE_TRUNCATE] = "truncate",
+ [TOMOYO_TYPE_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_REWRITE] = "rewrite",
+ [TOMOYO_TYPE_IOCTL] = "ioctl",
+ [TOMOYO_TYPE_CHMOD] = "chmod",
+ [TOMOYO_TYPE_CHOWN] = "chown",
+ [TOMOYO_TYPE_CHGRP] = "chgrp",
+ [TOMOYO_TYPE_CHROOT] = "chroot",
+ [TOMOYO_TYPE_MOUNT] = "mount",
+ [TOMOYO_TYPE_UMOUNT] = "unmount",
};
/* Keyword array for double path operations. */
-static const char *tomoyo_dp_keyword[TOMOYO_MAX_DOUBLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_LINK_ACL] = "link",
- [TOMOYO_TYPE_RENAME_ACL] = "rename",
+static const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION] = {
+ [TOMOYO_TYPE_LINK] = "link",
+ [TOMOYO_TYPE_RENAME] = "rename",
+ [TOMOYO_TYPE_PIVOT_ROOT] = "pivot_root",
};
/**
- * tomoyo_sp2keyword - Get the name of single path operation.
+ * tomoyo_path2keyword - Get the name of single path operation.
*
* @operation: Type of operation.
*
* Returns the name of single path operation.
*/
-const char *tomoyo_sp2keyword(const u8 operation)
+const char *tomoyo_path2keyword(const u8 operation)
{
- return (operation < TOMOYO_MAX_SINGLE_PATH_OPERATION)
- ? tomoyo_sp_keyword[operation] : NULL;
+ return (operation < TOMOYO_MAX_PATH_OPERATION)
+ ? tomoyo_path_keyword[operation] : NULL;
}
/**
- * tomoyo_dp2keyword - Get the name of double path operation.
+ * tomoyo_path22keyword - Get the name of double path operation.
*
* @operation: Type of operation.
*
* Returns the name of double path operation.
*/
-const char *tomoyo_dp2keyword(const u8 operation)
+const char *tomoyo_path22keyword(const u8 operation)
{
- return (operation < TOMOYO_MAX_DOUBLE_PATH_OPERATION)
- ? tomoyo_dp_keyword[operation] : NULL;
+ return (operation < TOMOYO_MAX_PATH2_OPERATION)
+ ? tomoyo_path2_keyword[operation] : NULL;
}
/**
@@ -142,7 +98,8 @@
static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
{
int error;
- struct tomoyo_path_info_with_data *buf = tomoyo_alloc(sizeof(*buf));
+ struct tomoyo_path_info_with_data *buf = kzalloc(sizeof(*buf),
+ GFP_KERNEL);
if (!buf)
return NULL;
@@ -154,20 +111,17 @@
tomoyo_fill_path_info(&buf->head);
return &buf->head;
}
- tomoyo_free(buf);
+ kfree(buf);
return NULL;
}
-/* Lock for domain->acl_info_list. */
-DECLARE_RWSEM(tomoyo_domain_acl_info_list_lock);
-
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
+static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
+ const char *filename2,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete);
+static int tomoyo_update_path_acl(const u8 type, const char *filename,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete);
/*
* tomoyo_globally_readable_list is used for holding list of pathnames which
@@ -194,8 +148,7 @@
* given "allow_read /lib/libc-2.5.so" to the domain which current process
* belongs to.
*/
-static LIST_HEAD(tomoyo_globally_readable_list);
-static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
+LIST_HEAD(tomoyo_globally_readable_list);
/**
* tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list.
@@ -204,40 +157,42 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_globally_readable_entry(const char *filename,
const bool is_delete)
{
- struct tomoyo_globally_readable_file_entry *new_entry;
+ struct tomoyo_globally_readable_file_entry *entry = NULL;
struct tomoyo_globally_readable_file_entry *ptr;
const struct tomoyo_path_info *saved_filename;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(filename, 1, 0, -1, __func__))
+ if (!tomoyo_is_correct_path(filename, 1, 0, -1))
return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
+ saved_filename = tomoyo_get_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (ptr->filename != saved_filename)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->filename = saved_filename;
+ saved_filename = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_globally_readable_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->filename = saved_filename;
- list_add_tail(&new_entry->list, &tomoyo_globally_readable_list);
- error = 0;
- out:
- up_write(&tomoyo_globally_readable_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_filename);
+ kfree(entry);
return error;
}
@@ -247,21 +202,22 @@
* @filename: The filename to check.
*
* Returns true if any domain can open @filename for reading, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
filename)
{
struct tomoyo_globally_readable_file_entry *ptr;
bool found = false;
- down_read(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (!ptr->is_deleted &&
tomoyo_path_matches_pattern(filename, ptr->filename)) {
found = true;
break;
}
}
- up_read(&tomoyo_globally_readable_list_lock);
return found;
}
@@ -272,6 +228,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
{
@@ -284,13 +242,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_globally_readable_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_globally_readable_list) {
struct tomoyo_globally_readable_file_entry *ptr;
@@ -304,7 +263,6 @@
if (!done)
break;
}
- up_read(&tomoyo_globally_readable_list_lock);
return done;
}
@@ -337,8 +295,7 @@
* which pretends as if /proc/self/ is not a symlink; so that we can forbid
* current process from accessing other process's information.
*/
-static LIST_HEAD(tomoyo_pattern_list);
-static DECLARE_RWSEM(tomoyo_pattern_list_lock);
+LIST_HEAD(tomoyo_pattern_list);
/**
* tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list.
@@ -347,40 +304,43 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_pattern_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_pattern_entry *new_entry;
+ struct tomoyo_pattern_entry *entry = NULL;
struct tomoyo_pattern_entry *ptr;
const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(pattern, 0, 1, 0, __func__))
- return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
+ saved_pattern = tomoyo_get_name(pattern);
if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ return error;
+ if (!saved_pattern->is_patterned)
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (saved_pattern != ptr->pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->pattern = saved_pattern;
+ saved_pattern = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_pattern_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_pattern_list);
- error = 0;
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_pattern_list_lock);
+ kfree(entry);
+ tomoyo_put_name(saved_pattern);
return error;
}
@@ -390,6 +350,8 @@
* @filename: The filename to find patterned pathname.
*
* Returns pointer to pathname pattern if matched, @filename otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static const struct tomoyo_path_info *
tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
@@ -397,8 +359,7 @@
struct tomoyo_pattern_entry *ptr;
const struct tomoyo_path_info *pattern = NULL;
- down_read(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -411,7 +372,6 @@
break;
}
}
- up_read(&tomoyo_pattern_list_lock);
if (pattern)
filename = pattern;
return filename;
@@ -424,6 +384,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_pattern_policy(char *data, const bool is_delete)
{
@@ -436,13 +398,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_pattern_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) {
struct tomoyo_pattern_entry *ptr;
ptr = list_entry(pos, struct tomoyo_pattern_entry, list);
@@ -453,7 +416,6 @@
if (!done)
break;
}
- up_read(&tomoyo_pattern_list_lock);
return done;
}
@@ -486,8 +448,7 @@
* " (deleted)" suffix if the file is already unlink()ed; so that we don't
* need to worry whether the file is already unlink()ed or not.
*/
-static LIST_HEAD(tomoyo_no_rewrite_list);
-static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
+LIST_HEAD(tomoyo_no_rewrite_list);
/**
* tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list.
@@ -496,39 +457,42 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_no_rewrite_entry(const char *pattern,
const bool is_delete)
{
- struct tomoyo_no_rewrite_entry *new_entry, *ptr;
+ struct tomoyo_no_rewrite_entry *entry = NULL;
+ struct tomoyo_no_rewrite_entry *ptr;
const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
+ int error = is_delete ? -ENOENT : -ENOMEM;
- if (!tomoyo_is_correct_path(pattern, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(pattern, 0, 0, 0))
return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
+ saved_pattern = tomoyo_get_name(pattern);
if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ return error;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->pattern != saved_pattern)
continue;
ptr->is_deleted = is_delete;
error = 0;
- goto out;
+ break;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->pattern = saved_pattern;
+ saved_pattern = NULL;
+ list_add_tail_rcu(&entry->list, &tomoyo_no_rewrite_list);
+ entry = NULL;
+ error = 0;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_no_rewrite_list);
- error = 0;
- out:
- up_write(&tomoyo_no_rewrite_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ tomoyo_put_name(saved_pattern);
+ kfree(entry);
return error;
}
@@ -539,14 +503,15 @@
*
* Returns true if @filename is specified by "deny_rewrite" directive,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
{
struct tomoyo_no_rewrite_entry *ptr;
bool found = false;
- down_read(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -554,7 +519,6 @@
found = true;
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return found;
}
@@ -565,6 +529,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
{
@@ -577,13 +543,14 @@
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_no_rewrite_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) {
struct tomoyo_no_rewrite_entry *ptr;
ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list);
@@ -594,7 +561,6 @@
if (!done)
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return done;
}
@@ -612,6 +578,8 @@
* Current policy syntax uses "allow_read/write" instead of "6",
* "allow_read" instead of "4", "allow_write" instead of "2",
* "allow_execute" instead of "1".
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_acl(const char *filename, u8 perm,
struct tomoyo_domain_info * const domain,
@@ -629,19 +597,19 @@
*/
return 0;
if (perm & 4)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_READ_ACL, filename,
- domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_READ, filename, domain,
+ is_delete);
if (perm & 2)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_WRITE_ACL, filename,
- domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_WRITE, filename, domain,
+ is_delete);
if (perm & 1)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_EXECUTE_ACL,
- filename, domain, is_delete);
+ tomoyo_update_path_acl(TOMOYO_TYPE_EXECUTE, filename, domain,
+ is_delete);
return 0;
}
/**
- * tomoyo_check_single_path_acl2 - Check permission for single path operation.
+ * tomoyo_path_acl2 - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @filename: Filename to check.
@@ -649,26 +617,28 @@
* @may_use_pattern: True if patterned ACL is permitted.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
- domain,
- const struct tomoyo_path_info *
- filename,
- const u16 perm,
- const bool may_use_pattern)
+static int tomoyo_path_acl2(const struct tomoyo_domain_info *domain,
+ const struct tomoyo_path_info *filename,
+ const u32 perm, const bool may_use_pattern)
{
struct tomoyo_acl_info *ptr;
int error = -EPERM;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_single_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path_acl *acl;
+ if (ptr->type != TOMOYO_TYPE_PATH_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (!(acl->perm & perm))
- continue;
+ acl = container_of(ptr, struct tomoyo_path_acl, head);
+ if (perm <= 0xFFFF) {
+ if (!(acl->perm & perm))
+ continue;
+ } else {
+ if (!(acl->perm_high & (perm >> 16)))
+ continue;
+ }
if (may_use_pattern || !acl->filename->is_patterned) {
if (!tomoyo_path_matches_pattern(filename,
acl->filename))
@@ -679,7 +649,6 @@
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
@@ -691,27 +660,28 @@
* @operation: Mode ("read" or "write" or "read/write" or "execute").
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename,
const u8 operation)
{
- u16 perm = 0;
+ u32 perm = 0;
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
if (operation == 6)
- perm = 1 << TOMOYO_TYPE_READ_WRITE_ACL;
+ perm = 1 << TOMOYO_TYPE_READ_WRITE;
else if (operation == 4)
- perm = 1 << TOMOYO_TYPE_READ_ACL;
+ perm = 1 << TOMOYO_TYPE_READ;
else if (operation == 2)
- perm = 1 << TOMOYO_TYPE_WRITE_ACL;
+ perm = 1 << TOMOYO_TYPE_WRITE;
else if (operation == 1)
- perm = 1 << TOMOYO_TYPE_EXECUTE_ACL;
+ perm = 1 << TOMOYO_TYPE_EXECUTE;
else
BUG();
- return tomoyo_check_single_path_acl2(domain, filename, perm,
- operation != 1);
+ return tomoyo_path_acl2(domain, filename, perm, operation != 1);
}
/**
@@ -724,6 +694,8 @@
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
const struct tomoyo_path_info *filename,
@@ -737,18 +709,17 @@
if (!filename)
return 0;
error = tomoyo_check_file_acl(domain, filename, perm);
- if (error && perm == 4 &&
- (domain->flags & TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ) == 0
+ if (error && perm == 4 && !domain->ignore_global_allow_read
&& tomoyo_is_globally_readable_file(filename))
error = 0;
if (perm == 6)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_WRITE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_READ_WRITE);
else if (perm == 4)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_READ);
else if (perm == 2)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_WRITE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_WRITE);
else if (perm == 1)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_EXECUTE_ACL);
+ msg = tomoyo_path2keyword(TOMOYO_TYPE_EXECUTE);
else
BUG();
if (!error)
@@ -777,6 +748,8 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
const bool is_delete)
@@ -795,28 +768,28 @@
if (strncmp(data, "allow_", 6))
goto out;
data += 6;
- for (type = 0; type < TOMOYO_MAX_SINGLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_sp_keyword[type]))
+ for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) {
+ if (strcmp(data, tomoyo_path_keyword[type]))
continue;
- return tomoyo_update_single_path_acl(type, filename,
- domain, is_delete);
+ return tomoyo_update_path_acl(type, filename, domain,
+ is_delete);
}
filename2 = strchr(filename, ' ');
if (!filename2)
goto out;
*filename2++ = '\0';
- for (type = 0; type < TOMOYO_MAX_DOUBLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_dp_keyword[type]))
+ for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) {
+ if (strcmp(data, tomoyo_path2_keyword[type]))
continue;
- return tomoyo_update_double_path_acl(type, filename, filename2,
- domain, is_delete);
+ return tomoyo_update_path2_acl(type, filename, filename2,
+ domain, is_delete);
}
out:
return -EINVAL;
}
/**
- * tomoyo_update_single_path_acl - Update "struct tomoyo_single_path_acl_record" list.
+ * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
*
* @type: Type of operation.
* @filename: Filename.
@@ -824,85 +797,82 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+static int tomoyo_update_path_acl(const u8 type, const char *filename,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete)
{
- static const u16 rw_mask =
- (1 << TOMOYO_TYPE_READ_ACL) | (1 << TOMOYO_TYPE_WRITE_ACL);
+ static const u32 rw_mask =
+ (1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE);
const struct tomoyo_path_info *saved_filename;
struct tomoyo_acl_info *ptr;
- struct tomoyo_single_path_acl_record *acl;
- int error = -ENOMEM;
- const u16 perm = 1 << type;
+ struct tomoyo_path_acl *entry = NULL;
+ int error = is_delete ? -ENOENT : -ENOMEM;
+ const u32 perm = 1 << type;
if (!domain)
return -EINVAL;
- if (!tomoyo_is_correct_path(filename, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(filename, 0, 0, 0))
return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
+ saved_filename = tomoyo_get_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path_acl *acl =
+ container_of(ptr, struct tomoyo_path_acl, head);
+ if (ptr->type != TOMOYO_TYPE_PATH_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
if (acl->filename != saved_filename)
continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- if ((acl->perm & rw_mask) == rw_mask)
- acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE_ACL;
- else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- ptr->type &= ~TOMOYO_ACL_DELETED;
- error = 0;
- goto out;
- }
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_SINGLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- if (perm == (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- acl->filename = saved_filename;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (acl->filename != saved_filename)
- continue;
- acl->perm &= ~perm;
- if ((acl->perm & rw_mask) != rw_mask)
- acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE_ACL);
- else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
- acl->perm &= ~rw_mask;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
+ if (is_delete) {
+ if (perm <= 0xFFFF)
+ acl->perm &= ~perm;
+ else
+ acl->perm_high &= ~(perm >> 16);
+ if ((acl->perm & rw_mask) != rw_mask)
+ acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE);
+ else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE)))
+ acl->perm &= ~rw_mask;
+ } else {
+ if (perm <= 0xFFFF)
+ acl->perm |= perm;
+ else
+ acl->perm_high |= (perm >> 16);
+ if ((acl->perm & rw_mask) == rw_mask)
+ acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE;
+ else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE))
+ acl->perm |= rw_mask;
+ }
error = 0;
break;
}
- out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->head.type = TOMOYO_TYPE_PATH_ACL;
+ if (perm <= 0xFFFF)
+ entry->perm = perm;
+ else
+ entry->perm_high = (perm >> 16);
+ if (perm == (1 << TOMOYO_TYPE_READ_WRITE))
+ entry->perm |= rw_mask;
+ entry->filename = saved_filename;
+ saved_filename = NULL;
+ list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
+ entry = NULL;
+ error = 0;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(entry);
+ tomoyo_put_name(saved_filename);
return error;
}
/**
- * tomoyo_update_double_path_acl - Update "struct tomoyo_double_path_acl_record" list.
+ * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
*
* @type: Type of operation.
* @filename1: First filename.
@@ -911,98 +881,88 @@
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+static int tomoyo_update_path2_acl(const u8 type, const char *filename1,
+ const char *filename2,
+ struct tomoyo_domain_info *const domain,
+ const bool is_delete)
{
const struct tomoyo_path_info *saved_filename1;
const struct tomoyo_path_info *saved_filename2;
struct tomoyo_acl_info *ptr;
- struct tomoyo_double_path_acl_record *acl;
- int error = -ENOMEM;
+ struct tomoyo_path2_acl *entry = NULL;
+ int error = is_delete ? -ENOENT : -ENOMEM;
const u8 perm = 1 << type;
if (!domain)
return -EINVAL;
- if (!tomoyo_is_correct_path(filename1, 0, 0, 0, __func__) ||
- !tomoyo_is_correct_path(filename2, 0, 0, 0, __func__))
+ if (!tomoyo_is_correct_path(filename1, 0, 0, 0) ||
+ !tomoyo_is_correct_path(filename2, 0, 0, 0))
return -EINVAL;
- saved_filename1 = tomoyo_save_name(filename1);
- saved_filename2 = tomoyo_save_name(filename2);
+ saved_filename1 = tomoyo_get_name(filename1);
+ saved_filename2 = tomoyo_get_name(filename2);
if (!saved_filename1 || !saved_filename2)
- return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
+ goto out;
+ if (!is_delete)
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path2_acl *acl =
+ container_of(ptr, struct tomoyo_path2_acl, head);
+ if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
if (acl->filename1 != saved_filename1 ||
acl->filename2 != saved_filename2)
continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- ptr->type &= ~TOMOYO_ACL_DELETED;
- error = 0;
- goto out;
- }
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_DOUBLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- acl->filename1 = saved_filename1;
- acl->filename2 = saved_filename2;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
- if (acl->filename1 != saved_filename1 ||
- acl->filename2 != saved_filename2)
- continue;
- acl->perm &= ~perm;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
+ if (is_delete)
+ acl->perm &= ~perm;
+ else
+ acl->perm |= perm;
error = 0;
break;
}
+ if (!is_delete && error && tomoyo_memory_ok(entry)) {
+ entry->head.type = TOMOYO_TYPE_PATH2_ACL;
+ entry->perm = perm;
+ entry->filename1 = saved_filename1;
+ saved_filename1 = NULL;
+ entry->filename2 = saved_filename2;
+ saved_filename2 = NULL;
+ list_add_tail_rcu(&entry->head.list, &domain->acl_info_list);
+ entry = NULL;
+ error = 0;
+ }
+ mutex_unlock(&tomoyo_policy_lock);
out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ tomoyo_put_name(saved_filename1);
+ tomoyo_put_name(saved_filename2);
+ kfree(entry);
return error;
}
/**
- * tomoyo_check_single_path_acl - Check permission for single path operation.
+ * tomoyo_path_acl - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @type: Type of operation.
* @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *filename)
+static int tomoyo_path_acl(struct tomoyo_domain_info *domain, const u8 type,
+ const struct tomoyo_path_info *filename)
{
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
- return tomoyo_check_single_path_acl2(domain, filename, 1 << type, 1);
+ return tomoyo_path_acl2(domain, filename, 1 << type, 1);
}
/**
- * tomoyo_check_double_path_acl - Check permission for double path operation.
+ * tomoyo_path2_acl - Check permission for double path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @type: Type of operation.
@@ -1010,13 +970,13 @@
* @filename2: Second filename to check.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *
- filename1,
- const struct tomoyo_path_info *
- filename2)
+static int tomoyo_path2_acl(const struct tomoyo_domain_info *domain,
+ const u8 type,
+ const struct tomoyo_path_info *filename1,
+ const struct tomoyo_path_info *filename2)
{
struct tomoyo_acl_info *ptr;
const u8 perm = 1 << type;
@@ -1024,13 +984,11 @@
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_double_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ struct tomoyo_path2_acl *acl;
+ if (ptr->type != TOMOYO_TYPE_PATH2_ACL)
continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
+ acl = container_of(ptr, struct tomoyo_path2_acl, head);
if (!(acl->perm & perm))
continue;
if (!tomoyo_path_matches_pattern(filename1, acl->filename1))
@@ -1040,12 +998,11 @@
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
/**
- * tomoyo_check_single_path_permission2 - Check permission for single path operation.
+ * tomoyo_path_permission2 - Check permission for single path operation.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
@@ -1053,11 +1010,13 @@
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
- const domain, u8 operation,
- const struct tomoyo_path_info *
- filename, const u8 mode)
+static int tomoyo_path_permission2(struct tomoyo_domain_info *const domain,
+ u8 operation,
+ const struct tomoyo_path_info *filename,
+ const u8 mode)
{
const char *msg;
int error;
@@ -1066,8 +1025,8 @@
if (!mode)
return 0;
next:
- error = tomoyo_check_single_path_acl(domain, operation, filename);
- msg = tomoyo_sp2keyword(operation);
+ error = tomoyo_path_acl(domain, operation, filename);
+ msg = tomoyo_path2keyword(operation);
if (!error)
goto ok;
if (tomoyo_verbose_mode(domain))
@@ -1076,7 +1035,7 @@
tomoyo_get_last_name(domain));
if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
const char *name = tomoyo_get_file_pattern(filename)->name;
- tomoyo_update_single_path_acl(operation, name, domain, false);
+ tomoyo_update_path_acl(operation, name, domain, false);
}
if (!is_enforce)
error = 0;
@@ -1086,9 +1045,9 @@
* we need to check "allow_rewrite" permission if the filename is
* specified by "deny_rewrite" keyword.
*/
- if (!error && operation == TOMOYO_TYPE_TRUNCATE_ACL &&
+ if (!error && operation == TOMOYO_TYPE_TRUNCATE &&
tomoyo_is_no_rewrite_file(filename)) {
- operation = TOMOYO_TYPE_REWRITE_ACL;
+ operation = TOMOYO_TYPE_REWRITE;
goto next;
}
return error;
@@ -1101,6 +1060,8 @@
* @filename: Check permission for "execute".
*
* Returns 0 on success, negativevalue otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename)
@@ -1129,6 +1090,7 @@
struct tomoyo_path_info *buf;
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
@@ -1140,6 +1102,7 @@
* don't call me.
*/
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
@@ -1152,49 +1115,50 @@
if ((acc_mode & MAY_WRITE) &&
((flag & O_TRUNC) || !(flag & O_APPEND)) &&
(tomoyo_is_no_rewrite_file(buf))) {
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE,
+ buf, mode);
}
if (!error)
error = tomoyo_check_file_perm2(domain, buf, acc_mode, "open",
mode);
if (!error && (flag & O_TRUNC))
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_TRUNCATE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_TRUNCATE,
+ buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate" and "symlink".
+ * tomoyo_path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate", "symlink", "ioctl", "chmod", "chown", "chgrp", "chroot", "mount" and "unmount".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path)
+int tomoyo_path_perm(const u8 operation, struct path *path)
{
int error = -ENOMEM;
struct tomoyo_path_info *buf;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
switch (operation) {
- case TOMOYO_TYPE_MKDIR_ACL:
- case TOMOYO_TYPE_RMDIR_ACL:
+ case TOMOYO_TYPE_MKDIR:
+ case TOMOYO_TYPE_RMDIR:
+ case TOMOYO_TYPE_CHROOT:
if (!buf->is_dir) {
/*
* tomoyo_get_path() reserves space for appending "/."
@@ -1203,10 +1167,10 @@
tomoyo_fill_path_info(buf);
}
}
- error = tomoyo_check_single_path_permission2(domain, operation, buf,
- mode);
+ error = tomoyo_path_permission2(domain, operation, buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
@@ -1215,21 +1179,23 @@
/**
* tomoyo_check_rewrite_permission - Check permission for "rewrite".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @filp: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp)
+int tomoyo_check_rewrite_permission(struct file *filp)
{
int error = -ENOMEM;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
struct tomoyo_path_info *buf;
+ int idx;
if (!mode || !filp->f_path.mnt)
return 0;
+
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(&filp->f_path);
if (!buf)
goto out;
@@ -1237,38 +1203,38 @@
error = 0;
goto out;
}
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
+ error = tomoyo_path_permission2(domain, TOMOYO_TYPE_REWRITE, buf, mode);
out:
- tomoyo_free(buf);
+ kfree(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_2path_perm - Check permission for "rename" and "link".
+ * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path1: Pointer to "struct path".
* @path2: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
- const u8 operation, struct path *path1,
- struct path *path2)
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2)
{
int error = -ENOMEM;
struct tomoyo_path_info *buf1, *buf2;
+ struct tomoyo_domain_info *domain = tomoyo_domain();
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
const char *msg;
+ int idx;
if (!mode || !path1->mnt || !path2->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf1 = tomoyo_get_path(path1);
buf2 = tomoyo_get_path(path2);
if (!buf1 || !buf2)
@@ -1289,8 +1255,8 @@
}
}
}
- error = tomoyo_check_double_path_acl(domain, operation, buf1, buf2);
- msg = tomoyo_dp2keyword(operation);
+ error = tomoyo_path2_acl(domain, operation, buf1, buf2);
+ msg = tomoyo_path22keyword(operation);
if (!error)
goto out;
if (tomoyo_verbose_mode(domain))
@@ -1301,12 +1267,13 @@
if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
const char *name1 = tomoyo_get_file_pattern(buf1)->name;
const char *name2 = tomoyo_get_file_pattern(buf2)->name;
- tomoyo_update_double_path_acl(operation, name1, name2, domain,
- false);
+ tomoyo_update_path2_acl(operation, name1, name2, domain,
+ false);
}
out:
- tomoyo_free(buf1);
- tomoyo_free(buf2);
+ kfree(buf1);
+ kfree(buf2);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 0000000..9645525
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,370 @@
+/*
+ * security/tomoyo/gc.c
+ *
+ * Implementation of the Domain-Based Mandatory Access Control.
+ *
+ * Copyright (C) 2005-2010 NTT DATA CORPORATION
+ *
+ */
+
+#include "common.h"
+#include <linux/kthread.h>
+
+enum tomoyo_gc_id {
+ TOMOYO_ID_DOMAIN_INITIALIZER,
+ TOMOYO_ID_DOMAIN_KEEPER,
+ TOMOYO_ID_ALIAS,
+ TOMOYO_ID_GLOBALLY_READABLE,
+ TOMOYO_ID_PATTERN,
+ TOMOYO_ID_NO_REWRITE,
+ TOMOYO_ID_MANAGER,
+ TOMOYO_ID_NAME,
+ TOMOYO_ID_ACL,
+ TOMOYO_ID_DOMAIN
+};
+
+struct tomoyo_gc_entry {
+ struct list_head list;
+ int type;
+ void *element;
+};
+static LIST_HEAD(tomoyo_gc_queue);
+static DEFINE_MUTEX(tomoyo_gc_mutex);
+
+/* Caller holds tomoyo_policy_lock mutex. */
+static bool tomoyo_add_to_gc(const int type, void *element)
+{
+ struct tomoyo_gc_entry *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return false;
+ entry->type = type;
+ entry->element = element;
+ list_add(&entry->list, &tomoyo_gc_queue);
+ return true;
+}
+
+static void tomoyo_del_allow_read
+(struct tomoyo_globally_readable_file_entry *ptr)
+{
+ tomoyo_put_name(ptr->filename);
+}
+
+static void tomoyo_del_file_pattern(struct tomoyo_pattern_entry *ptr)
+{
+ tomoyo_put_name(ptr->pattern);
+}
+
+static void tomoyo_del_no_rewrite(struct tomoyo_no_rewrite_entry *ptr)
+{
+ tomoyo_put_name(ptr->pattern);
+}
+
+static void tomoyo_del_domain_initializer
+(struct tomoyo_domain_initializer_entry *ptr)
+{
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+static void tomoyo_del_domain_keeper(struct tomoyo_domain_keeper_entry *ptr)
+{
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+static void tomoyo_del_alias(struct tomoyo_alias_entry *ptr)
+{
+ tomoyo_put_name(ptr->original_name);
+ tomoyo_put_name(ptr->aliased_name);
+}
+
+static void tomoyo_del_manager(struct tomoyo_policy_manager_entry *ptr)
+{
+ tomoyo_put_name(ptr->manager);
+}
+
+static void tomoyo_del_acl(struct tomoyo_acl_info *acl)
+{
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ {
+ struct tomoyo_path_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->filename);
+ }
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ {
+ struct tomoyo_path2_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->filename1);
+ tomoyo_put_name(entry->filename2);
+ }
+ break;
+ default:
+ printk(KERN_WARNING "Unknown type\n");
+ break;
+ }
+}
+
+static bool tomoyo_del_domain(struct tomoyo_domain_info *domain)
+{
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+ /*
+ * Since we don't protect whole execve() operation using SRCU,
+ * we need to recheck domain->users at this point.
+ *
+ * (1) Reader starts SRCU section upon execve().
+ * (2) Reader traverses tomoyo_domain_list and finds this domain.
+ * (3) Writer marks this domain as deleted.
+ * (4) Garbage collector removes this domain from tomoyo_domain_list
+ * because this domain is marked as deleted and used by nobody.
+ * (5) Reader saves reference to this domain into
+ * "struct linux_binprm"->cred->security .
+ * (6) Reader finishes SRCU section, although execve() operation has
+ * not finished yet.
+ * (7) Garbage collector waits for SRCU synchronization.
+ * (8) Garbage collector kfree() this domain because this domain is
+ * used by nobody.
+ * (9) Reader finishes execve() operation and restores this domain from
+ * "struct linux_binprm"->cred->security.
+ *
+ * By updating domain->users at (5), we can solve this race problem
+ * by rechecking domain->users at (8).
+ */
+ if (atomic_read(&domain->users))
+ return false;
+ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
+ tomoyo_del_acl(acl);
+ tomoyo_memory_free(acl);
+ }
+ tomoyo_put_name(domain->domainname);
+ return true;
+}
+
+
+static void tomoyo_del_name(const struct tomoyo_name_entry *ptr)
+{
+}
+
+static void tomoyo_collect_entry(void)
+{
+ mutex_lock(&tomoyo_policy_lock);
+ {
+ struct tomoyo_globally_readable_file_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_GLOBALLY_READABLE, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_pattern_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_PATTERN, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_no_rewrite_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_NO_REWRITE, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_initializer_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_INITIALIZER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_keeper_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_KEEPER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_alias_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_ALIAS, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_policy_manager_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list,
+ list) {
+ if (!ptr->is_deleted)
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_MANAGER, ptr))
+ list_del_rcu(&ptr->list);
+ else
+ break;
+ }
+ }
+ {
+ struct tomoyo_domain_info *domain;
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ struct tomoyo_acl_info *acl;
+ list_for_each_entry_rcu(acl, &domain->acl_info_list,
+ list) {
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ if (container_of(acl,
+ struct tomoyo_path_acl,
+ head)->perm ||
+ container_of(acl,
+ struct tomoyo_path_acl,
+ head)->perm_high)
+ continue;
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ if (container_of(acl,
+ struct tomoyo_path2_acl,
+ head)->perm)
+ continue;
+ break;
+ default:
+ continue;
+ }
+ if (tomoyo_add_to_gc(TOMOYO_ID_ACL, acl))
+ list_del_rcu(&acl->list);
+ else
+ break;
+ }
+ if (!domain->is_deleted || atomic_read(&domain->users))
+ continue;
+ /*
+ * Nobody is referring this domain. But somebody may
+ * refer this domain after successful execve().
+ * We recheck domain->users after SRCU synchronization.
+ */
+ if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, domain))
+ list_del_rcu(&domain->list);
+ else
+ break;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ mutex_lock(&tomoyo_name_list_lock);
+ {
+ int i;
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct tomoyo_name_entry *ptr;
+ list_for_each_entry_rcu(ptr, &tomoyo_name_list[i],
+ list) {
+ if (atomic_read(&ptr->users))
+ continue;
+ if (tomoyo_add_to_gc(TOMOYO_ID_NAME, ptr))
+ list_del_rcu(&ptr->list);
+ else {
+ i = TOMOYO_MAX_HASH;
+ break;
+ }
+ }
+ }
+ }
+ mutex_unlock(&tomoyo_name_list_lock);
+}
+
+static void tomoyo_kfree_entry(void)
+{
+ struct tomoyo_gc_entry *p;
+ struct tomoyo_gc_entry *tmp;
+
+ list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
+ switch (p->type) {
+ case TOMOYO_ID_DOMAIN_INITIALIZER:
+ tomoyo_del_domain_initializer(p->element);
+ break;
+ case TOMOYO_ID_DOMAIN_KEEPER:
+ tomoyo_del_domain_keeper(p->element);
+ break;
+ case TOMOYO_ID_ALIAS:
+ tomoyo_del_alias(p->element);
+ break;
+ case TOMOYO_ID_GLOBALLY_READABLE:
+ tomoyo_del_allow_read(p->element);
+ break;
+ case TOMOYO_ID_PATTERN:
+ tomoyo_del_file_pattern(p->element);
+ break;
+ case TOMOYO_ID_NO_REWRITE:
+ tomoyo_del_no_rewrite(p->element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(p->element);
+ break;
+ case TOMOYO_ID_NAME:
+ tomoyo_del_name(p->element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(p->element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ if (!tomoyo_del_domain(p->element))
+ continue;
+ break;
+ default:
+ printk(KERN_WARNING "Unknown type\n");
+ break;
+ }
+ tomoyo_memory_free(p->element);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static int tomoyo_gc_thread(void *unused)
+{
+ daemonize("GC for TOMOYO");
+ if (mutex_trylock(&tomoyo_gc_mutex)) {
+ int i;
+ for (i = 0; i < 10; i++) {
+ tomoyo_collect_entry();
+ if (list_empty(&tomoyo_gc_queue))
+ break;
+ synchronize_srcu(&tomoyo_ss);
+ tomoyo_kfree_entry();
+ }
+ mutex_unlock(&tomoyo_gc_mutex);
+ }
+ do_exit(0);
+}
+
+void tomoyo_run_gc(void)
+{
+ struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
+ "GC for TOMOYO");
+ if (!IS_ERR(task))
+ wake_up_process(task);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d4..c00df45 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,9 +14,8 @@
#include <linux/mnt_namespace.h>
#include <linux/fs_struct.h>
#include <linux/hash.h>
-
+#include <linux/magic.h>
#include "common.h"
-#include "realpath.h"
/**
* tomoyo_encode: Convert binary string to ascii string.
@@ -112,7 +111,7 @@
path_put(&ns_root);
/* Prepend "/proc" prefix if using internal proc vfs mount. */
if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
- (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
+ (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
sp -= 5;
if (sp >= newname)
memcpy(sp, "/proc", 5);
@@ -149,12 +148,12 @@
*
* Returns the realpath of the given @path on success, NULL otherwise.
*
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
+ * These functions use kzalloc(), so the caller must call kfree()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath_from_path(struct path *path)
{
- char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
+ char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
<= TOMOYO_MAX_PATHNAME_LEN - 1);
@@ -163,7 +162,7 @@
if (tomoyo_realpath_from_path2(path, buf,
TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
return buf;
- tomoyo_free(buf);
+ kfree(buf);
return NULL;
}
@@ -206,98 +205,47 @@
}
/* Memory allocated for non-string data. */
-static unsigned int tomoyo_allocated_memory_for_elements;
-/* Quota for holding non-string data. */
-static unsigned int tomoyo_quota_for_elements;
+static atomic_t tomoyo_policy_memory_size;
+/* Quota for holding policy. */
+static unsigned int tomoyo_quota_for_policy;
/**
- * tomoyo_alloc_element - Allocate permanent memory for structures.
+ * tomoyo_memory_ok - Check memory quota.
*
- * @size: Size in bytes.
+ * @ptr: Pointer to allocated memory.
*
- * Returns pointer to allocated memory on success, NULL otherwise.
+ * Returns true on success, false otherwise.
*
- * Memory has to be zeroed.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
+ * Caller holds tomoyo_policy_lock.
+ * Memory pointed by @ptr will be zeroed on success.
*/
-void *tomoyo_alloc_element(const unsigned int size)
+bool tomoyo_memory_ok(void *ptr)
{
- static char *buf;
- static DEFINE_MUTEX(lock);
- static unsigned int buf_used_len = PATH_MAX;
- char *ptr = NULL;
- /*Assumes sizeof(void *) >= sizeof(long) is true. */
- const unsigned int word_aligned_size
- = roundup(size, max(sizeof(void *), sizeof(long)));
- if (word_aligned_size > PATH_MAX)
- return NULL;
- mutex_lock(&lock);
- if (buf_used_len + word_aligned_size > PATH_MAX) {
- if (!tomoyo_quota_for_elements ||
- tomoyo_allocated_memory_for_elements
- + PATH_MAX <= tomoyo_quota_for_elements)
- ptr = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!ptr) {
- printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_alloc_element().\n");
- if (!tomoyo_policy_loaded)
- panic("MAC Initialization failed.\n");
- } else {
- buf = ptr;
- tomoyo_allocated_memory_for_elements += PATH_MAX;
- buf_used_len = word_aligned_size;
- ptr = buf;
- }
- } else if (word_aligned_size) {
- int i;
- ptr = buf + buf_used_len;
- buf_used_len += word_aligned_size;
- for (i = 0; i < word_aligned_size; i++) {
- if (!ptr[i])
- continue;
- printk(KERN_ERR "WARNING: Reserved memory was tainted! "
- "The system might go wrong.\n");
- ptr[i] = '\0';
- }
+ int allocated_len = ptr ? ksize(ptr) : 0;
+ atomic_add(allocated_len, &tomoyo_policy_memory_size);
+ if (ptr && (!tomoyo_quota_for_policy ||
+ atomic_read(&tomoyo_policy_memory_size)
+ <= tomoyo_quota_for_policy)) {
+ memset(ptr, 0, allocated_len);
+ return true;
}
- mutex_unlock(&lock);
- return ptr;
+ printk(KERN_WARNING "ERROR: Out of memory "
+ "for tomoyo_alloc_element().\n");
+ if (!tomoyo_policy_loaded)
+ panic("MAC Initialization failed.\n");
+ return false;
}
-/* Memory allocated for string data in bytes. */
-static unsigned int tomoyo_allocated_memory_for_savename;
-/* Quota for holding string data in bytes. */
-static unsigned int tomoyo_quota_for_savename;
-
-/*
- * TOMOYO uses this hash only when appending a string into the string
- * table. Frequency of appending strings is very low. So we don't need
- * large (e.g. 64k) hash size. 256 will be sufficient.
- */
-#define TOMOYO_HASH_BITS 8
-#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
-
-/*
- * tomoyo_name_entry is a structure which is used for linking
- * "struct tomoyo_path_info" into tomoyo_name_list .
+/**
+ * tomoyo_memory_free - Free memory for elements.
*
- * Since tomoyo_name_list manages a list of strings which are shared by
- * multiple processes (whereas "struct tomoyo_path_info" inside
- * "struct tomoyo_path_info_with_data" is not shared), a reference counter will
- * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
- * when TOMOYO starts supporting garbage collector.
+ * @ptr: Pointer to allocated memory.
*/
-struct tomoyo_name_entry {
- struct list_head list;
- struct tomoyo_path_info entry;
-};
-
-/* Structure for available memory region. */
-struct tomoyo_free_memory_block_list {
- struct list_head list;
- char *ptr; /* Pointer to a free area. */
- int len; /* Length of the area. */
-};
+void tomoyo_memory_free(void *ptr)
+{
+ atomic_sub(ksize(ptr), &tomoyo_policy_memory_size);
+ kfree(ptr);
+}
/*
* tomoyo_name_list is used for holding string data used by TOMOYO.
@@ -305,87 +253,58 @@
* "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
* "const struct tomoyo_path_info *".
*/
-static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+/* Lock for protecting tomoyo_name_list . */
+DEFINE_MUTEX(tomoyo_name_list_lock);
/**
- * tomoyo_save_name - Allocate permanent memory for string data.
+ * tomoyo_get_name - Allocate permanent memory for string data.
*
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
- *
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
-const struct tomoyo_path_info *tomoyo_save_name(const char *name)
+const struct tomoyo_path_info *tomoyo_get_name(const char *name)
{
- static LIST_HEAD(fmb_list);
- static DEFINE_MUTEX(lock);
struct tomoyo_name_entry *ptr;
unsigned int hash;
- /* fmb contains available size in bytes.
- fmb is removed from the fmb_list when fmb->len becomes 0. */
- struct tomoyo_free_memory_block_list *fmb;
int len;
- char *cp;
+ int allocated_len;
struct list_head *head;
if (!name)
return NULL;
len = strlen(name) + 1;
- if (len > TOMOYO_MAX_PATHNAME_LEN) {
- printk(KERN_WARNING "ERROR: Name too long "
- "for tomoyo_save_name().\n");
- return NULL;
- }
hash = full_name_hash((const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
-
- mutex_lock(&lock);
+ mutex_lock(&tomoyo_name_list_lock);
list_for_each_entry(ptr, head, list) {
- if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
- goto out;
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
+ continue;
+ atomic_inc(&ptr->users);
+ goto out;
}
- list_for_each_entry(fmb, &fmb_list, list) {
- if (len <= fmb->len)
- goto ready;
- }
- if (!tomoyo_quota_for_savename ||
- tomoyo_allocated_memory_for_savename + PATH_MAX
- <= tomoyo_quota_for_savename)
- cp = kzalloc(PATH_MAX, GFP_KERNEL);
- else
- cp = NULL;
- fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
- if (!cp || !fmb) {
- kfree(cp);
- kfree(fmb);
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
+ allocated_len = ptr ? ksize(ptr) : 0;
+ if (!ptr || (tomoyo_quota_for_policy &&
+ atomic_read(&tomoyo_policy_memory_size) + allocated_len
+ > tomoyo_quota_for_policy)) {
+ kfree(ptr);
printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_save_name().\n");
+ "for tomoyo_get_name().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
ptr = NULL;
goto out;
}
- tomoyo_allocated_memory_for_savename += PATH_MAX;
- list_add(&fmb->list, &fmb_list);
- fmb->ptr = cp;
- fmb->len = PATH_MAX;
- ready:
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
- goto out;
- ptr->entry.name = fmb->ptr;
- memmove(fmb->ptr, name, len);
+ atomic_add(allocated_len, &tomoyo_policy_memory_size);
+ ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+ memmove((char *) ptr->entry.name, name, len);
+ atomic_set(&ptr->users, 1);
tomoyo_fill_path_info(&ptr->entry);
- fmb->ptr += len;
- fmb->len -= len;
list_add_tail(&ptr->list, head);
- if (fmb->len == 0) {
- list_del(&fmb->list);
- kfree(fmb);
- }
out:
- mutex_unlock(&lock);
+ mutex_unlock(&tomoyo_name_list_lock);
return ptr ? &ptr->entry : NULL;
}
@@ -400,45 +319,14 @@
for (i = 0; i < TOMOYO_MAX_HASH; i++)
INIT_LIST_HEAD(&tomoyo_name_list[i]);
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
- tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
- list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
- down_read(&tomoyo_domain_list_lock);
+ tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME);
+ /*
+ * tomoyo_read_lock() is not needed because this function is
+ * called before the first "delete" request.
+ */
+ list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
panic("Can't register tomoyo_kernel_domain");
- up_read(&tomoyo_domain_list_lock);
-}
-
-/* Memory allocated for temporary purpose. */
-static atomic_t tomoyo_dynamic_memory_size;
-
-/**
- * tomoyo_alloc - Allocate memory for temporary purpose.
- *
- * @size: Size in bytes.
- *
- * Returns pointer to allocated memory on success, NULL otherwise.
- */
-void *tomoyo_alloc(const size_t size)
-{
- void *p = kzalloc(size, GFP_KERNEL);
- if (p)
- atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
- return p;
-}
-
-/**
- * tomoyo_free - Release memory allocated by tomoyo_alloc().
- *
- * @p: Pointer returned by tomoyo_alloc(). May be NULL.
- *
- * Returns nothing.
- */
-void tomoyo_free(const void *p)
-{
- if (p) {
- atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
- kfree(p);
- }
}
/**
@@ -451,32 +339,19 @@
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
{
if (!head->read_eof) {
- const unsigned int shared
- = tomoyo_allocated_memory_for_savename;
- const unsigned int private
- = tomoyo_allocated_memory_for_elements;
- const unsigned int dynamic
- = atomic_read(&tomoyo_dynamic_memory_size);
+ const unsigned int policy
+ = atomic_read(&tomoyo_policy_memory_size);
char buffer[64];
memset(buffer, 0, sizeof(buffer));
- if (tomoyo_quota_for_savename)
+ if (tomoyo_quota_for_policy)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
- tomoyo_quota_for_savename);
+ tomoyo_quota_for_policy);
else
buffer[0] = '\0';
- tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
- if (tomoyo_quota_for_elements)
- snprintf(buffer, sizeof(buffer) - 1,
- " (Quota: %10u)",
- tomoyo_quota_for_elements);
- else
- buffer[0] = '\0';
- tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
- tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
- tomoyo_io_printf(head, "Total: %10u\n",
- shared + private + dynamic);
+ tomoyo_io_printf(head, "Policy: %10u%s\n", policy, buffer);
+ tomoyo_io_printf(head, "Total: %10u\n", policy);
head->read_eof = true;
}
return 0;
@@ -494,9 +369,7 @@
char *data = head->write_buf;
unsigned int size;
- if (sscanf(data, "Shared: %u", &size) == 1)
- tomoyo_quota_for_savename = size;
- else if (sscanf(data, "Private: %u", &size) == 1)
- tomoyo_quota_for_elements = size;
+ if (sscanf(data, "Policy: %u", &size) == 1)
+ tomoyo_quota_for_policy = size;
return 0;
}
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
deleted file mode 100644
index 78217a3..0000000
--- a/security/tomoyo/realpath.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * security/tomoyo/realpath.h
- *
- * Get the canonicalized absolute pathnames. The basis for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_REALPATH_H
-#define _SECURITY_TOMOYO_REALPATH_H
-
-struct path;
-struct tomoyo_path_info;
-struct tomoyo_io_buffer;
-
-/* Convert binary string to ascii string. */
-int tomoyo_encode(char *buffer, int buflen, const char *str);
-
-/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
-int tomoyo_realpath_from_path2(struct path *path, char *newname,
- int newname_len);
-
-/*
- * Returns realpath(3) of the given pathname but ignores chroot'ed root.
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
- * if these functions didn't return NULL.
- */
-char *tomoyo_realpath(const char *pathname);
-/*
- * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
- */
-char *tomoyo_realpath_nofollow(const char *pathname);
-/* Same with tomoyo_realpath() except that the pathname is already solved. */
-char *tomoyo_realpath_from_path(struct path *path);
-
-/*
- * Allocate memory for ACL entry.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
- */
-void *tomoyo_alloc_element(const unsigned int size);
-
-/*
- * Keep the given name on the RAM.
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
- */
-const struct tomoyo_path_info *tomoyo_save_name(const char *name);
-
-/* Allocate memory for temporary use (e.g. permission checks). */
-void *tomoyo_alloc(const size_t size);
-
-/* Free memory allocated by tomoyo_alloc(). */
-void tomoyo_free(const void *p);
-
-/* Check for memory usage. */
-int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
-
-/* Set memory quota. */
-int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
-
-/* Initialize realpath related code. */
-void __init tomoyo_realpath_init(void);
-
-#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 2aceebf..dedd97d 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -11,8 +11,6 @@
#include <linux/security.h>
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
{
@@ -23,21 +21,23 @@
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ struct tomoyo_domain_info *domain = old->security;
+ new->security = domain;
+ if (domain)
+ atomic_inc(&domain->users);
return 0;
}
static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ tomoyo_cred_prepare(new, old, 0);
+}
+
+static void tomoyo_cred_free(struct cred *cred)
+{
+ struct tomoyo_domain_info *domain = cred->security;
+ if (domain)
+ atomic_dec(&domain->users);
}
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
@@ -61,6 +61,14 @@
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
/*
+ * Release reference to "struct tomoyo_domain_info" stored inside
+ * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
+ * stored inside "bprm->cred->security" will be acquired later inside
+ * tomoyo_find_next_domain().
+ */
+ atomic_dec(&((struct tomoyo_domain_info *)
+ bprm->cred->security)->users);
+ /*
* Tell tomoyo_bprm_check_security() is called for the first time of an
* execve operation.
*/
@@ -76,8 +84,12 @@
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain)
- return tomoyo_find_next_domain(bprm);
+ if (!domain) {
+ const int idx = tomoyo_read_lock();
+ const int err = tomoyo_find_next_domain(bprm);
+ tomoyo_read_unlock(idx);
+ return err;
+ }
/*
* Read permission is checked against interpreters using next domain.
*/
@@ -87,67 +99,56 @@
static int tomoyo_path_truncate(struct path *path, loff_t length,
unsigned int time_attrs)
{
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_TRUNCATE_ACL,
- path);
+ return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path);
}
static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_UNLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path);
}
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
int mode)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_MKDIR_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_MKDIR, &path);
}
static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RMDIR_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path);
}
static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_SYMLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path);
}
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
int mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
- int type = TOMOYO_TYPE_CREATE_ACL;
+ int type = TOMOYO_TYPE_CREATE;
switch (mode & S_IFMT) {
case S_IFCHR:
- type = TOMOYO_TYPE_MKCHAR_ACL;
+ type = TOMOYO_TYPE_MKCHAR;
break;
case S_IFBLK:
- type = TOMOYO_TYPE_MKBLOCK_ACL;
+ type = TOMOYO_TYPE_MKBLOCK;
break;
case S_IFIFO:
- type = TOMOYO_TYPE_MKFIFO_ACL;
+ type = TOMOYO_TYPE_MKFIFO;
break;
case S_IFSOCK:
- type = TOMOYO_TYPE_MKSOCK_ACL;
+ type = TOMOYO_TYPE_MKSOCK;
break;
}
- return tomoyo_check_1path_perm(tomoyo_domain(),
- type, &path);
+ return tomoyo_path_perm(type, &path);
}
static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -155,9 +156,7 @@
{
struct path path1 = { new_dir->mnt, old_dentry };
struct path path2 = { new_dir->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_LINK_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
static int tomoyo_path_rename(struct path *old_parent,
@@ -167,16 +166,14 @@
{
struct path path1 = { old_parent->mnt, old_dentry };
struct path path2 = { new_parent->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RENAME_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
- return tomoyo_check_rewrite_permission(tomoyo_domain(), file);
+ return tomoyo_check_rewrite_permission(file);
return 0;
}
@@ -189,6 +186,51 @@
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_IOCTL, &file->f_path);
+}
+
+static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
+ mode_t mode)
+{
+ struct path path = { mnt, dentry };
+ return tomoyo_path_perm(TOMOYO_TYPE_CHMOD, &path);
+}
+
+static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
+{
+ int error = 0;
+ if (uid != (uid_t) -1)
+ error = tomoyo_path_perm(TOMOYO_TYPE_CHOWN, path);
+ if (!error && gid != (gid_t) -1)
+ error = tomoyo_path_perm(TOMOYO_TYPE_CHGRP, path);
+ return error;
+}
+
+static int tomoyo_path_chroot(struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path);
+}
+
+static int tomoyo_sb_mount(char *dev_name, struct path *path,
+ char *type, unsigned long flags, void *data)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_MOUNT, path);
+}
+
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct path path = { mnt, mnt->mnt_root };
+ return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path);
+}
+
+static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+ return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -198,6 +240,7 @@
.cred_alloc_blank = tomoyo_cred_alloc_blank,
.cred_prepare = tomoyo_cred_prepare,
.cred_transfer = tomoyo_cred_transfer,
+ .cred_free = tomoyo_cred_free,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
.file_fcntl = tomoyo_file_fcntl,
@@ -210,8 +253,18 @@
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
+ .file_ioctl = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
};
+/* Lock for GC. */
+struct srcu_struct tomoyo_ss;
+
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
@@ -219,7 +272,8 @@
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
- if (register_security(&tomoyo_security_ops))
+ if (register_security(&tomoyo_security_ops) ||
+ init_srcu_struct(&tomoyo_ss))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
deleted file mode 100644
index ed75832..0000000
--- a/security/tomoyo/tomoyo.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * security/tomoyo/tomoyo.h
- *
- * Implementation of the Domain-Based Mandatory Access Control.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_TOMOYO_H
-#define _SECURITY_TOMOYO_TOMOYO_H
-
-struct tomoyo_path_info;
-struct path;
-struct inode;
-struct linux_binprm;
-struct pt_regs;
-
-int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
- const struct tomoyo_path_info *filename);
-int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
- struct path *path, const int flag);
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path);
-int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path1,
- struct path *path2);
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp);
-int tomoyo_find_next_domain(struct linux_binprm *bprm);
-
-/* Index numbers for Access Controls. */
-
-#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
-#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
-
-/* Index numbers for File Controls. */
-
-/*
- * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
- * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
- * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
- * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
- * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
- * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
- */
-
-#define TOMOYO_TYPE_READ_WRITE_ACL 0
-#define TOMOYO_TYPE_EXECUTE_ACL 1
-#define TOMOYO_TYPE_READ_ACL 2
-#define TOMOYO_TYPE_WRITE_ACL 3
-#define TOMOYO_TYPE_CREATE_ACL 4
-#define TOMOYO_TYPE_UNLINK_ACL 5
-#define TOMOYO_TYPE_MKDIR_ACL 6
-#define TOMOYO_TYPE_RMDIR_ACL 7
-#define TOMOYO_TYPE_MKFIFO_ACL 8
-#define TOMOYO_TYPE_MKSOCK_ACL 9
-#define TOMOYO_TYPE_MKBLOCK_ACL 10
-#define TOMOYO_TYPE_MKCHAR_ACL 11
-#define TOMOYO_TYPE_TRUNCATE_ACL 12
-#define TOMOYO_TYPE_SYMLINK_ACL 13
-#define TOMOYO_TYPE_REWRITE_ACL 14
-#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
-
-#define TOMOYO_TYPE_LINK_ACL 0
-#define TOMOYO_TYPE_RENAME_ACL 1
-#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
-
-#define TOMOYO_DOMAINPOLICY 0
-#define TOMOYO_EXCEPTIONPOLICY 1
-#define TOMOYO_DOMAIN_STATUS 2
-#define TOMOYO_PROCESS_STATUS 3
-#define TOMOYO_MEMINFO 4
-#define TOMOYO_SELFDOMAIN 5
-#define TOMOYO_VERSION 6
-#define TOMOYO_PROFILE 7
-#define TOMOYO_MANAGER 8
-
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
-
-static inline struct tomoyo_domain_info *tomoyo_domain(void)
-{
- return current_cred()->security;
-}
-
-static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
- *task)
-{
- return task_cred_xxx(task, security);
-}
-
-#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */