Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6

This commit is contained in:
David S. Miller 2008-07-27 16:51:21 -07:00
commit 281c7413ed
243 changed files with 5975 additions and 2724 deletions

View file

@ -138,14 +138,8 @@ So, what's changed?
Set active the IRQ edge(s)/level. This replaces the
SA1111 INTPOL manipulation, and the set_GPIO_IRQ_edge()
function. Type should be one of the following:
#define IRQT_NOEDGE (0)
#define IRQT_RISING (__IRQT_RISEDGE)
#define IRQT_FALLING (__IRQT_FALEDGE)
#define IRQT_BOTHEDGE (__IRQT_RISEDGE|__IRQT_FALEDGE)
#define IRQT_LOW (__IRQT_LOWLVL)
#define IRQT_HIGH (__IRQT_HIGHLVL)
function. Type should be one of IRQ_TYPE_xxx defined in
<linux/irq.h>
3. set_GPIO_IRQ_edge() is obsolete, and should be replaced by set_irq_type.

View file

@ -1024,6 +1024,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
intel-mac-v3 Intel Mac Type 3
intel-mac-v4 Intel Mac Type 4
intel-mac-v5 Intel Mac Type 5
intel-mac-auto Intel Mac (detect type according to subsystem id)
macmini Intel Mac Mini (equivalent with type 3)
macbook Intel Mac Book (eq. type 5)
macbook-pro-v1 Intel Mac Book Pro 1st generation (eq. type 3)

View file

@ -73,10 +73,10 @@ recompiled, or use "make C=2" to run sparse on the files whether they need to
be recompiled or not. The latter is a fast way to check the whole tree if you
have already built it.
The optional make variable CHECKFLAGS can be used to pass arguments to sparse.
The build system passes -Wbitwise to sparse automatically. To perform
endianness checks, you may define __CHECK_ENDIAN__:
The optional make variable CF can be used to pass arguments to sparse. The
build system passes -Wbitwise to sparse automatically. To perform endianness
checks, you may define __CHECK_ENDIAN__:
make C=2 CHECKFLAGS="-D__CHECK_ENDIAN__"
make C=2 CF="-D__CHECK_ENDIAN__"
These checks are disabled by default as they generate a host of warnings.

3
Kbuild
View file

@ -43,7 +43,7 @@ $(obj)/$(bounds-file): kernel/bounds.s Kbuild
# 2) Generate asm-offsets.h
#
offsets-file := include/asm-$(SRCARCH)/asm-offsets.h
offsets-file := include/asm/asm-offsets.h
always += $(offsets-file)
targets += $(offsets-file)
@ -81,7 +81,6 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
$(call if_changed_dep,cc_s_c)
$(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
$(Q)mkdir -p $(dir $@)
$(call cmd,offsets)
#####

115
Makefile
View file

@ -205,6 +205,9 @@ ifeq ($(ARCH),x86_64)
SRCARCH := x86
endif
# Where to locate arch specific headers
hdr-arch := $(SRCARCH)
KCONFIG_CONFIG ?= .config
# SHELL used by kbuild
@ -326,7 +329,8 @@ AFLAGS_KERNEL =
# Needed to be compatible with the O= option
LINUXINCLUDE := -Iinclude \
$(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
-include include/linux/autoconf.h
-I$(srctree)/arch/$(hdr-arch)/include \
-include include/linux/autoconf.h
KBUILD_CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
@ -922,7 +926,9 @@ ifneq ($(KBUILD_SRC),)
/bin/false; \
fi;
$(Q)if [ ! -d include2 ]; then mkdir -p include2; fi;
$(Q)ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm
$(Q)if [ -e $(srctree)/include/asm-$(SRCARCH)/system.h ]; then \
ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm; \
fi
endif
# prepare2 creates a makefile if using a separate output directory
@ -948,22 +954,34 @@ export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
# The asm symlink changes when $(ARCH) changes.
# Detect this and ask user to run make mrproper
include/asm: FORCE
$(Q)set -e; asmlink=`readlink include/asm | cut -d '-' -f 2`; \
if [ -L include/asm ]; then \
if [ "$$asmlink" != "$(SRCARCH)" ]; then \
define check-symlink
set -e; \
if [ -L include/asm ]; then \
asmlink=`readlink include/asm | cut -d '-' -f 2`; \
if [ "$$asmlink" != "$(SRCARCH)" ]; then \
echo "ERROR: the symlink $@ points to asm-$$asmlink but asm-$(SRCARCH) was expected"; \
echo " set ARCH or save .config and run 'make mrproper' to fix it"; \
exit 1; \
fi; \
else \
echo ' SYMLINK $@ -> include/asm-$(SRCARCH)'; \
if [ ! -d include ]; then \
mkdir -p include; \
fi; \
ln -fsn asm-$(SRCARCH) $@; \
exit 1; \
fi; \
fi
endef
# We create the target directory of the symlink if it does
# not exist so the test in chack-symlink works and we have a
# directory for generated filesas used by some architectures.
define create-symlink
if [ ! -L include/asm ]; then \
echo ' SYMLINK $@ -> include/asm-$(SRCARCH)'; \
if [ ! -d include/asm-$(SRCARCH) ]; then \
mkdir -p include/asm-$(SRCARCH); \
fi; \
ln -fsn asm-$(SRCARCH) $@; \
fi
endef
include/asm: FORCE
$(Q)$(check-symlink)
$(Q)$(create-symlink)
# Generate some files
# ---------------------------------------------------------------------------
@ -1010,36 +1028,43 @@ firmware_install: FORCE
# ---------------------------------------------------------------------------
# Kernel headers
INSTALL_HDR_PATH=$(objtree)/usr
export INSTALL_HDR_PATH
HDRFILTER=generic i386 x86_64
HDRARCHES=$(filter-out $(HDRFILTER),$(patsubst $(srctree)/include/asm-%/Kbuild,%,$(wildcard $(srctree)/include/asm-*/Kbuild)))
#Default location for installed headers
export INSTALL_HDR_PATH = $(objtree)/usr
hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
# Find out where the Kbuild file is located to support
# arch/$(ARCH)/include/asm
hdr-dir = $(strip \
$(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/asm/Kbuild), \
arch/$(hdr-arch)/include/asm, include/asm-$(hdr-arch)))
# If we do an all arch process set dst to asm-$(hdr-arch)
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
PHONY += __headers
__headers: include/linux/version.h scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
PHONY += headers_install_all
headers_install_all: include/linux/version.h scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
$(Q)for arch in $(HDRARCHES); do \
$(MAKE) ARCH=$$arch -f $(srctree)/scripts/Makefile.headersinst obj=include BIASMDIR=-bi-$$arch ;\
done
headers_install_all:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/headers.sh install
PHONY += headers_install
headers_install: include/linux/version.h scripts_basic FORCE
@if [ ! -r $(srctree)/include/asm-$(SRCARCH)/Kbuild ]; then \
echo '*** Error: Headers not exportable for this architecture ($(SRCARCH))'; \
exit 1 ; fi
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.headersinst ARCH=$(SRCARCH) obj=include
headers_install: __headers
$(if $(wildcard $(srctree)/$(hdr-dir)/Kbuild),, \
$(error Headers not exportable for the $(SRCARCH) architecture))
$(Q)$(MAKE) $(hdr-inst)=include
$(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst)
PHONY += headers_check_all
headers_check_all: headers_install_all
$(Q)for arch in $(HDRARCHES); do \
$(MAKE) ARCH=$$arch -f $(srctree)/scripts/Makefile.headersinst obj=include BIASMDIR=-bi-$$arch HDRCHECK=1 ;\
done
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/headers.sh check
PHONY += headers_check
headers_check: headers_install
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.headersinst ARCH=$(SRCARCH) obj=include HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=include HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst) HDRCHECK=1
# ---------------------------------------------------------------------------
# Modules
@ -1131,7 +1156,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
include/linux/autoconf.h include/linux/version.h \
include/linux/utsrelease.h \
include/linux/bounds.h include/asm*/asm-offsets.h \
Module.symvers tags TAGS cscope*
Module.symvers Module.markers tags TAGS cscope*
# clean - Delete most, but leave enough to build external modules
#
@ -1150,7 +1175,7 @@ clean: archclean $(clean-dirs)
\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
-o -name 'Module.markers' \) \
-o -name 'Module.markers' -o -name '.tmp_*.o.*' \) \
-type f -print | xargs rm -f
# mrproper - Delete all generated files, including .config
@ -1224,21 +1249,17 @@ help:
@echo ' cscope - Generate cscope index'
@echo ' kernelrelease - Output the release version string'
@echo ' kernelversion - Output the version stored in Makefile'
@if [ -r $(srctree)/include/asm-$(SRCARCH)/Kbuild ]; then \
echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
@echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
echo ' (default: $(INSTALL_HDR_PATH))'; \
fi
@echo ''
echo ''
@echo 'Static analysers'
@echo ' checkstack - Generate a list of stack hogs'
@echo ' namespacecheck - Name space analysis on compiled kernel'
@echo ' versioncheck - Sanity check on version.h usage'
@echo ' includecheck - Check for duplicate included header files'
@echo ' export_report - List the usages of all exported symbols'
@if [ -r $(srctree)/include/asm-$(SRCARCH)/Kbuild ]; then \
echo ' headers_check - Sanity check on exported headers'; \
fi
@echo ''
@echo ' headers_check - Sanity check on exported headers'; \
echo ''
@echo 'Kernel packaging:'
@$(MAKE) $(build)=$(package-dir) help
@echo ''
@ -1411,7 +1432,11 @@ define find-sources
\( -name config -o -name 'asm-*' \) -prune \
-o -name $1 -print; \
for arch in $(ALLINCLUDE_ARCHS) ; do \
find $(__srctree)include/asm-$${arch} $(RCS_FIND_IGNORE) \
test -e $(__srctree)include/asm-$${arch} && \
find $(__srctree)include/asm-$${arch} $(RCS_FIND_IGNORE) \
-name $1 -print; \
test -e $(__srctree)arch/$${arch}/include/asm && \
find $(__srctree)arch/$${arch}/include/asm $(RCS_FIND_IGNORE) \
-name $1 -print; \
done ; \
find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \

View file

@ -314,7 +314,7 @@ config ARCH_IOP32X
select PLAT_IOP
select PCI
select GENERIC_GPIO
select HAVE_GPIO_LIB
select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's 80219 and IOP32X (XScale) family of
processors.
@ -325,7 +325,7 @@ config ARCH_IOP33X
select PLAT_IOP
select PCI
select GENERIC_GPIO
select HAVE_GPIO_LIB
select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's IOP33X (XScale) family of processors.
@ -418,7 +418,7 @@ config ARCH_MXC
select GENERIC_CLOCKEVENTS
select ARCH_MTD_XIP
select GENERIC_GPIO
select HAVE_GPIO_LIB
select ARCH_REQUIRE_GPIOLIB
help
Support for Freescale MXC/iMX-based family of processors

View file

@ -67,7 +67,7 @@ tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM946T) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi)
tune-$(CONFIG_CPU_ARM946E) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi)
tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi

View file

@ -331,17 +331,17 @@ static int locomo_gpio_type(unsigned int irq, unsigned int type)
mask = 1 << (irq - LOCOMO_IRQ_GPIO_START);
if (type == IRQT_PROBE) {
if (type == IRQ_TYPE_PROBE) {
if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
return 0;
type = __IRQT_RISEDGE | __IRQT_FALEDGE;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & __IRQT_RISEDGE)
if (type & IRQ_TYPE_EDGE_RISING)
GPIO_IRQ_rising_edge |= mask;
else
GPIO_IRQ_rising_edge &= ~mask;
if (type & __IRQT_FALEDGE)
if (type & IRQ_TYPE_EDGE_FALLING)
GPIO_IRQ_falling_edge |= mask;
else
GPIO_IRQ_falling_edge &= ~mask;
@ -473,7 +473,7 @@ static void locomo_setup_irq(struct locomo *lchip)
/*
* Install handler for IRQ_LOCOMO_HW.
*/
set_irq_type(lchip->irq, IRQT_FALLING);
set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
set_irq_chip_data(lchip->irq, irqbase);
set_irq_chained_handler(lchip->irq, locomo_handler);

View file

@ -241,14 +241,14 @@ static int sa1111_type_lowirq(unsigned int irq, unsigned int flags)
void __iomem *mapbase = get_irq_chip_data(irq);
unsigned long ip0;
if (flags == IRQT_PROBE)
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & __IRQT_RISEDGE) ^ !(flags & __IRQT_FALEDGE)) == 0)
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
if (flags & __IRQT_RISEDGE)
if (flags & IRQ_TYPE_EDGE_RISING)
ip0 &= ~mask;
else
ip0 |= mask;
@ -338,14 +338,14 @@ static int sa1111_type_highirq(unsigned int irq, unsigned int flags)
void __iomem *mapbase = get_irq_chip_data(irq);
unsigned long ip1;
if (flags == IRQT_PROBE)
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & __IRQT_RISEDGE) ^ !(flags & __IRQT_FALEDGE)) == 0)
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
if (flags & __IRQT_RISEDGE)
if (flags & IRQ_TYPE_EDGE_RISING)
ip1 &= ~mask;
else
ip1 |= mask;
@ -427,7 +427,7 @@ static void sa1111_setup_irq(struct sa1111 *sachip)
/*
* Register SA1111 interrupt
*/
set_irq_type(sachip->irq, IRQT_RISING);
set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
set_irq_data(sachip->irq, irqbase);
set_irq_chained_handler(sachip->irq, sa1111_irq_handler);
}

File diff suppressed because it is too large Load diff

View file

@ -330,10 +330,10 @@ static void __init cap9adk_board_init(void)
/* Serial */
at91_add_device_serial();
/* USB Host */
set_irq_type(AT91CAP9_ID_UHP, IRQT_HIGH);
set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH);
at91_add_device_usbh(&cap9adk_usbh_data);
/* USB HS */
set_irq_type(AT91CAP9_ID_UDPHS, IRQT_HIGH);
set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH);
at91_add_device_usba(&cap9adk_usba_udc_data);
/* SPI */
at91_add_device_spi(cap9adk_spi_devices, ARRAY_SIZE(cap9adk_spi_devices));
@ -350,7 +350,7 @@ static void __init cap9adk_board_init(void)
/* I2C */
at91_add_device_i2c(NULL, 0);
/* LCD Controller */
set_irq_type(AT91CAP9_ID_LCDC, IRQT_HIGH);
set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH);
at91_add_device_lcdc(&cap9adk_lcdc_data);
/* AC97 */
at91_add_device_ac97(&cap9adk_ac97_data);

0
arch/arm/mach-at91/board-yl-9200.c Executable file → Normal file
View file

View file

@ -56,19 +56,19 @@ static int at91_aic_set_type(unsigned irq, unsigned type)
unsigned int smr, srctype;
switch (type) {
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
srctype = AT91_AIC_SRCTYPE_HIGH;
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
srctype = AT91_AIC_SRCTYPE_RISING;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
if ((irq == AT91_ID_FIQ) || is_extern_irq(irq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_LOW;
else
return -EINVAL;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
if ((irq == AT91_ID_FIQ) || is_extern_irq(irq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_FALLING;
else

View file

@ -226,7 +226,7 @@ static void ep93xx_gpio_irq_ack(unsigned int irq)
int port = line >> 3;
int port_mask = 1 << (line & 7);
if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQT_BOTHEDGE) {
if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
ep93xx_gpio_update_int_params(port);
}
@ -240,7 +240,7 @@ static void ep93xx_gpio_irq_mask_ack(unsigned int irq)
int port = line >> 3;
int port_mask = 1 << (line & 7);
if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQT_BOTHEDGE)
if ((irq_desc[irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
gpio_int_unmasked[port] &= ~port_mask;
@ -283,27 +283,27 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
gpio_direction_input(gpio);
switch (type) {
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] |= port_mask;
desc->handle_irq = handle_edge_irq;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] &= ~port_mask;
desc->handle_irq = handle_edge_irq;
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] |= port_mask;
desc->handle_irq = handle_level_irq;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] &= ~port_mask;
desc->handle_irq = handle_level_irq;
break;
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
gpio_int_type1[port] |= port_mask;
/* set initial polarity based on current input level */
if (gpio_get_value(gpio))

View file

@ -111,7 +111,7 @@ imx_gpio_irq_type(unsigned int _irq, unsigned int type)
reg = irq >> 5;
bit = 1 << (irq % 32);
if (type == IRQT_PROBE) {
if (type == IRQ_TYPE_PROBE) {
/* Don't mess with enabled GPIOs using preconfigured edges or
GPIOs set to alternate function during probe */
/* TODO: support probe */
@ -120,7 +120,7 @@ imx_gpio_irq_type(unsigned int _irq, unsigned int type)
// return 0;
// if (GAFR(gpio) & (0x3 << (((gpio) & 0xf)*2)))
// return 0;
// type = __IRQT_RISEDGE | __IRQT_FALEDGE;
// type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
GIUS(reg) |= bit;
@ -128,19 +128,19 @@ imx_gpio_irq_type(unsigned int _irq, unsigned int type)
DEBUG_IRQ("setting type of irq %d to ", _irq);
if (type & __IRQT_RISEDGE) {
if (type & IRQ_TYPE_EDGE_RISING) {
DEBUG_IRQ("rising edges\n");
irq_type = 0x0;
}
if (type & __IRQT_FALEDGE) {
if (type & IRQ_TYPE_EDGE_FALLING) {
DEBUG_IRQ("falling edges\n");
irq_type = 0x1;
}
if (type & __IRQT_LOWLVL) {
if (type & IRQ_TYPE_LEVEL_LOW) {
DEBUG_IRQ("low level\n");
irq_type = 0x3;
}
if (type & __IRQT_HIGHLVL) {
if (type & IRQ_TYPE_LEVEL_HIGH) {
DEBUG_IRQ("high level\n");
irq_type = 0x2;
}

View file

@ -329,19 +329,19 @@ static int ixp2000_GPIO_irq_type(unsigned int irq, unsigned int type)
/*
* Then, set the proper trigger type.
*/
if (type & IRQT_FALLING)
if (type & IRQ_TYPE_EDGE_FALLING)
GPIO_IRQ_falling_edge |= 1 << line;
else
GPIO_IRQ_falling_edge &= ~(1 << line);
if (type & IRQT_RISING)
if (type & IRQ_TYPE_EDGE_RISING)
GPIO_IRQ_rising_edge |= 1 << line;
else
GPIO_IRQ_rising_edge &= ~(1 << line);
if (type & IRQT_LOW)
if (type & IRQ_TYPE_LEVEL_LOW)
GPIO_IRQ_level_low |= 1 << line;
else
GPIO_IRQ_level_low &= ~(1 << line);
if (type & IRQT_HIGH)
if (type & IRQ_TYPE_LEVEL_HIGH)
GPIO_IRQ_level_high |= 1 << line;
else
GPIO_IRQ_level_high &= ~(1 << line);

View file

@ -126,23 +126,23 @@ static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
return -EINVAL;
switch (type) {
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
int_style = IXP23XX_GPIO_STYLE_TRANSITIONAL;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
int_style = IXP23XX_GPIO_STYLE_RISING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
int_style = IXP23XX_GPIO_STYLE_FALLING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_HIGH;
irq_type = IXP23XX_IRQ_LEVEL;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_LOW;
irq_type = IXP23XX_IRQ_LEVEL;
break;

View file

@ -110,8 +110,8 @@ static int __init roadrunner_map_irq(struct pci_dev *dev, u8 idsel, u8 pin)
static void __init roadrunner_pci_preinit(void)
{
set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQT_LOW);
set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
ixp23xx_pci_preinit();
}

View file

@ -30,10 +30,10 @@
void __init avila_pci_preinit(void)
{
set_irq_type(IRQ_AVILA_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_AVILA_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_AVILA_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_AVILA_PCI_INTD, IRQT_LOW);
set_irq_type(IRQ_AVILA_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_AVILA_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_AVILA_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_AVILA_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -142,23 +142,23 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
return -EINVAL;
switch (type){
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
irq_type = IXP4XX_IRQ_EDGE;
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
irq_type = IXP4XX_IRQ_EDGE;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
irq_type = IXP4XX_IRQ_EDGE;
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
irq_type = IXP4XX_IRQ_LEVEL;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
irq_type = IXP4XX_IRQ_LEVEL;
break;

View file

@ -27,8 +27,8 @@
void __init coyote_pci_preinit(void)
{
set_irq_type(IRQ_COYOTE_PCI_SLOT0, IRQT_LOW);
set_irq_type(IRQ_COYOTE_PCI_SLOT1, IRQT_LOW);
set_irq_type(IRQ_COYOTE_PCI_SLOT0, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_COYOTE_PCI_SLOT1, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -25,12 +25,12 @@
void __init dsmg600_pci_preinit(void)
{
set_irq_type(IRQ_DSMG600_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTD, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTE, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTF, IRQT_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTE, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_DSMG600_PCI_INTF, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -25,9 +25,9 @@
void __init fsg_pci_preinit(void)
{
set_irq_type(IRQ_FSG_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_FSG_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_FSG_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_FSG_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_FSG_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_FSG_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -29,8 +29,8 @@
void __init gateway7001_pci_preinit(void)
{
set_irq_type(IRQ_IXP4XX_GPIO10, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO11, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -41,10 +41,10 @@
*/
void __init gtwx5715_pci_preinit(void)
{
set_irq_type(GTWX5715_PCI_SLOT0_INTA_IRQ, IRQT_LOW);
set_irq_type(GTWX5715_PCI_SLOT0_INTB_IRQ, IRQT_LOW);
set_irq_type(GTWX5715_PCI_SLOT1_INTA_IRQ, IRQT_LOW);
set_irq_type(GTWX5715_PCI_SLOT1_INTB_IRQ, IRQT_LOW);
set_irq_type(GTWX5715_PCI_SLOT0_INTA_IRQ, IRQ_TYPE_LEVEL_LOW);
set_irq_type(GTWX5715_PCI_SLOT0_INTB_IRQ, IRQ_TYPE_LEVEL_LOW);
set_irq_type(GTWX5715_PCI_SLOT1_INTA_IRQ, IRQ_TYPE_LEVEL_LOW);
set_irq_type(GTWX5715_PCI_SLOT1_INTB_IRQ, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -27,10 +27,10 @@
void __init ixdp425_pci_preinit(void)
{
set_irq_type(IRQ_IXDP425_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTD, IRQT_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -25,8 +25,8 @@
void __init ixdpg425_pci_preinit(void)
{
set_irq_type(IRQ_IXP4XX_GPIO6, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO7, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -24,11 +24,11 @@
void __init nas100d_pci_preinit(void)
{
set_irq_type(IRQ_NAS100D_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTD, IRQT_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTE, IRQT_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NAS100D_PCI_INTE, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -24,9 +24,9 @@
void __init nslu2_pci_preinit(void)
{
set_irq_type(IRQ_NSLU2_PCI_INTA, IRQT_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -29,8 +29,8 @@
void __init wg302v2_pci_preinit(void)
{
set_irq_type(IRQ_IXP4XX_GPIO8, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO9, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW);
set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}

View file

@ -72,21 +72,21 @@ static int ks8695_irq_set_type(unsigned int irqno, unsigned int type)
ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
switch (type) {
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
mode = IOPC_TM_HIGH;
level_triggered = 1;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
mode = IOPC_TM_LOW;
level_triggered = 1;
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
mode = IOPC_TM_RISING;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
mode = IOPC_TM_FALLING;
break;
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
mode = IOPC_TM_EDGE;
break;
default:

View file

@ -99,19 +99,19 @@ netx_hif_irq_type(unsigned int _irq, unsigned int type)
irq = _irq - NETX_IRQ_HIF_CHAINED(0);
if (type & __IRQT_RISEDGE) {
if (type & IRQ_TYPE_EDGE_RISING) {
DEBUG_IRQ("rising edges\n");
val |= (1 << 26) << irq;
}
if (type & __IRQT_FALEDGE) {
if (type & IRQ_TYPE_EDGE_FALLING) {
DEBUG_IRQ("falling edges\n");
val &= ~((1 << 26) << irq);
}
if (type & __IRQT_LOWLVL) {
if (type & IRQ_TYPE_LEVEL_LOW) {
DEBUG_IRQ("low level\n");
val &= ~((1 << 26) << irq);
}
if (type & __IRQT_HIGHLVL) {
if (type & IRQ_TYPE_LEVEL_HIGH) {
DEBUG_IRQ("high level\n");
val |= (1 << 26) << irq;
}

View file

@ -288,7 +288,7 @@ static void __init osk_init_cf(void)
return;
}
/* the CF I/O IRQ is really active-low */
set_irq_type(OMAP_GPIO_IRQ(62), IRQT_FALLING);
set_irq_type(OMAP_GPIO_IRQ(62), IRQ_TYPE_EDGE_FALLING);
}
static void __init osk_init_irq(void)
@ -483,7 +483,7 @@ static void __init osk_mistral_init(void)
omap_cfg_reg(P20_1610_GPIO4); /* PENIRQ */
gpio_request(4, "ts_int");
gpio_direction_input(4);
set_irq_type(OMAP_GPIO_IRQ(4), IRQT_FALLING);
set_irq_type(OMAP_GPIO_IRQ(4), IRQ_TYPE_EDGE_FALLING);
spi_register_board_info(mistral_boardinfo,
ARRAY_SIZE(mistral_boardinfo));
@ -494,7 +494,7 @@ static void __init osk_mistral_init(void)
int ret = 0;
gpio_direction_input(OMAP_MPUIO(2));
set_irq_type(OMAP_GPIO_IRQ(OMAP_MPUIO(2)), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(OMAP_MPUIO(2)), IRQ_TYPE_EDGE_RISING);
#ifdef CONFIG_PM
/* share the IRQ in case someone wants to use the
* button for more than wakeup from system sleep.

View file

@ -298,11 +298,11 @@ palmz71_powercable(int irq, void *dev_id)
if (omap_get_gpio_datain(PALMZ71_USBDETECT_GPIO)) {
printk(KERN_INFO "PM: Power cable connected\n");
set_irq_type(OMAP_GPIO_IRQ(PALMZ71_USBDETECT_GPIO),
IRQT_FALLING);
IRQ_TYPE_EDGE_FALLING);
} else {
printk(KERN_INFO "PM: Power cable disconnected\n");
set_irq_type(OMAP_GPIO_IRQ(PALMZ71_USBDETECT_GPIO),
IRQT_RISING);
IRQ_TYPE_EDGE_RISING);
}
return IRQ_HANDLED;
}

View file

@ -186,10 +186,10 @@ static void __init voiceblue_init(void)
omap_request_gpio(13);
omap_request_gpio(14);
omap_request_gpio(15);
set_irq_type(OMAP_GPIO_IRQ(12), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(13), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(14), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(15), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(12), IRQ_TYPE_EDGE_RISING);
set_irq_type(OMAP_GPIO_IRQ(13), IRQ_TYPE_EDGE_RISING);
set_irq_type(OMAP_GPIO_IRQ(14), IRQ_TYPE_EDGE_RISING);
set_irq_type(OMAP_GPIO_IRQ(15), IRQ_TYPE_EDGE_RISING);
platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
omap_board_config = voiceblue_config;

View file

@ -181,7 +181,7 @@ void omap1510_fpga_init_irq(void)
*/
omap_request_gpio(13);
omap_set_gpio_direction(13, 1);
set_irq_type(OMAP_GPIO_IRQ(13), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(13), IRQ_TYPE_EDGE_RISING);
set_irq_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux);
}

View file

@ -337,17 +337,17 @@ static void __init apollon_sw_init(void)
omap_request_gpio(SW_DOWN_GPIO58);
omap_set_gpio_direction(SW_DOWN_GPIO58, 1);
set_irq_type(OMAP_GPIO_IRQ(SW_ENTER_GPIO16), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(SW_ENTER_GPIO16), IRQ_TYPE_EDGE_RISING);
if (request_irq(OMAP_GPIO_IRQ(SW_ENTER_GPIO16), &apollon_sw_interrupt,
IRQF_SHARED, "enter sw",
&apollon_sw_interrupt))
return;
set_irq_type(OMAP_GPIO_IRQ(SW_UP_GPIO17), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(SW_UP_GPIO17), IRQ_TYPE_EDGE_RISING);
if (request_irq(OMAP_GPIO_IRQ(SW_UP_GPIO17), &apollon_sw_interrupt,
IRQF_SHARED, "up sw",
&apollon_sw_interrupt))
return;
set_irq_type(OMAP_GPIO_IRQ(SW_DOWN_GPIO58), IRQT_RISING);
set_irq_type(OMAP_GPIO_IRQ(SW_DOWN_GPIO58), IRQ_TYPE_EDGE_RISING);
if (request_irq(OMAP_GPIO_IRQ(SW_DOWN_GPIO58), &apollon_sw_interrupt,
IRQF_SHARED, "down sw",
&apollon_sw_interrupt))

View file

@ -213,7 +213,7 @@ void __init db88f5281_pci_preinit(void)
pin = DB88F5281_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit faield to "
"set_irq_type pin %d\n", pin);
@ -226,7 +226,7 @@ void __init db88f5281_pci_preinit(void)
pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN;
if (gpio_request(pin, "PCI Int2") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit faield "
"to set_irq_type pin %d\n", pin);

View file

@ -91,27 +91,27 @@ static int orion5x_gpio_set_irq_type(u32 irq, u32 type)
desc = irq_desc + irq;
switch (type) {
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
desc->handle_irq = handle_level_irq;
desc->status |= IRQ_LEVEL;
orion5x_clrbits(GPIO_IN_POL, (1 << pin));
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
desc->handle_irq = handle_level_irq;
desc->status |= IRQ_LEVEL;
orion5x_setbits(GPIO_IN_POL, (1 << pin));
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
desc->handle_irq = handle_edge_irq;
desc->status &= ~IRQ_LEVEL;
orion5x_clrbits(GPIO_IN_POL, (1 << pin));
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
desc->handle_irq = handle_edge_irq;
desc->status &= ~IRQ_LEVEL;
orion5x_setbits(GPIO_IN_POL, (1 << pin));
break;
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
desc->handle_irq = handle_edge_irq;
desc->status &= ~IRQ_LEVEL;
/*
@ -156,7 +156,7 @@ static void orion5x_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (cause & (1 << pin)) {
irq = gpio_to_irq(pin);
desc = irq_desc + irq;
if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQT_BOTHEDGE) {
if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
/* Swap polarity (race with GPIO line) */
u32 polarity = readl(GPIO_IN_POL);
polarity ^= 1 << pin;

View file

@ -148,7 +148,7 @@ void __init rd88f5182_pci_preinit(void)
pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN;
if (gpio_request(pin, "PCI IntA") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit faield to "
"set_irq_type pin %d\n", pin);
@ -161,7 +161,7 @@ void __init rd88f5182_pci_preinit(void)
pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN;
if (gpio_request(pin, "PCI IntB") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit faield to "
"set_irq_type pin %d\n", pin);

View file

@ -117,7 +117,7 @@ void __init qnap_ts209_pci_preinit(void)
pin = QNAP_TS209_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "qnap_ts209_pci_preinit failed to "
"set_irq_type pin %d\n", pin);
@ -131,7 +131,7 @@ void __init qnap_ts209_pci_preinit(void)
pin = QNAP_TS209_PCI_SLOT1_IRQ_PIN;
if (gpio_request(pin, "PCI Int2") == 0) {
if (gpio_direction_input(pin) == 0) {
set_irq_type(gpio_to_irq(pin), IRQT_LOW);
set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "qnap_ts209_pci_preinit failed "
"to set_irq_type pin %d\n", pin);

View file

@ -56,28 +56,28 @@ static void pnx4008_mask_ack_irq(unsigned int irq)
static int pnx4008_set_irq_type(unsigned int irq, unsigned int type)
{
switch (type) {
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq)); /*rising edge */
set_irq_handler(irq, handle_edge_irq);
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
__raw_writel(__raw_readl(INTC_ATR(irq)) | INTC_BIT(irq), INTC_ATR(irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq)); /*falling edge */
set_irq_handler(irq, handle_edge_irq);
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(irq)) & ~INTC_BIT(irq), INTC_APR(irq)); /*low level */
set_irq_handler(irq, handle_level_irq);
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
__raw_writel(__raw_readl(INTC_ATR(irq)) & ~INTC_BIT(irq), INTC_ATR(irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(irq)) | INTC_BIT(irq), INTC_APR(irq)); /* high level */
set_irq_handler(irq, handle_level_irq);
break;
/* IRQT_BOTHEDGE is not supported */
/* IRQ_TYPE_EDGE_BOTH is not supported */
default:
printk(KERN_ERR "PNX4008 IRQ: Unsupported irq type %d\n", type);
return -1;

View file

@ -71,7 +71,7 @@ void __cmx270_pci_init_irq(int irq_gpio)
cmx270_it8152_irq_gpio = irq_gpio;
set_irq_type(gpio_to_irq(irq_gpio), IRQT_RISING);
set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING);
set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx270_it8152_irq_demux);
}

View file

@ -113,7 +113,7 @@ static void __init lpd270_init_irq(void)
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
set_irq_type(IRQ_GPIO(0), IRQT_FALLING);
set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}

View file

@ -152,7 +152,7 @@ static void __init lubbock_init_irq(void)
}
set_irq_chained_handler(IRQ_GPIO(0), lubbock_irq_handler);
set_irq_type(IRQ_GPIO(0), IRQT_FALLING);
set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM

View file

@ -191,7 +191,7 @@ static void __init mainstone_init_irq(void)
MST_INTSETCLR = 0;
set_irq_chained_handler(IRQ_GPIO(0), mainstone_irq_handler);
set_irq_type(IRQ_GPIO(0), IRQT_FALLING);
set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM

View file

@ -146,18 +146,18 @@ void sharpsl_pm_pxa_init(void)
if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, IRQF_DISABLED, "AC Input Detect", sharpsl_ac_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin));
}
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin),IRQT_BOTHEDGE);
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin),IRQ_TYPE_EDGE_BOTH);
if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, IRQF_DISABLED, "Battery Cover", sharpsl_fatal_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock));
}
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock),IRQT_FALLING);
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock),IRQ_TYPE_EDGE_FALLING);
if (sharpsl_pm.machinfo->gpio_fatal) {
if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, IRQF_DISABLED, "Fatal Battery", sharpsl_fatal_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal));
}
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal),IRQT_FALLING);
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal),IRQ_TYPE_EDGE_FALLING);
}
if (sharpsl_pm.machinfo->batfull_irq)
@ -166,7 +166,7 @@ void sharpsl_pm_pxa_init(void)
if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, IRQF_DISABLED, "CO", sharpsl_chrg_full_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull));
}
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull),IRQT_RISING);
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull),IRQ_TYPE_EDGE_RISING);
}
}

View file

@ -122,7 +122,7 @@ static struct resource dm9000_resources[] = {
[2] = {
.start = TRIZEPS4_ETH_IRQ,
.end = TRIZEPS4_ETH_IRQ,
.flags = (IORESOURCE_IRQ | IRQT_RISING),
.flags = (IORESOURCE_IRQ | IRQ_TYPE_EDGE_RISING),
},
};

View file

@ -96,7 +96,7 @@ static struct resource cerf_flash_resource = {
static void __init cerf_init_irq(void)
{
sa1100_init_irq();
set_irq_type(CERF_ETH_IRQ, IRQT_RISING);
set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING);
}
static struct map_desc cerf_io_desc[] __initdata = {

View file

@ -834,7 +834,7 @@ static void __init h3800_init_irq(void)
set_irq_chip(irq, &h3800_gpio_irqchip);
}
#endif
set_irq_type(IRQ_GPIO_H3800_ASIC, IRQT_RISING);
set_irq_type(IRQ_GPIO_H3800_ASIC, IRQ_TYPE_EDGE_RISING);
set_irq_chained_handler(IRQ_GPIO_H3800_ASIC, h3800_IRQ_demux);
}

View file

@ -46,17 +46,17 @@ static int sa1100_gpio_type(unsigned int irq, unsigned int type)
else
mask = GPIO11_27_MASK(irq);
if (type == IRQT_PROBE) {
if (type == IRQ_TYPE_PROBE) {
if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
return 0;
type = __IRQT_RISEDGE | __IRQT_FALEDGE;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & __IRQT_RISEDGE) {
if (type & IRQ_TYPE_EDGE_RISING) {
GPIO_IRQ_rising_edge |= mask;
} else
GPIO_IRQ_rising_edge &= ~mask;
if (type & __IRQT_FALEDGE) {
if (type & IRQ_TYPE_EDGE_FALLING) {
GPIO_IRQ_falling_edge |= mask;
} else
GPIO_IRQ_falling_edge &= ~mask;

View file

@ -151,7 +151,7 @@ static int __devinit neponset_probe(struct platform_device *dev)
/*
* Install handler for GPIO25.
*/
set_irq_type(IRQ_GPIO25, IRQT_RISING);
set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING);
set_irq_chained_handler(IRQ_GPIO25, neponset_irq_handler);
/*

View file

@ -143,7 +143,7 @@ static void __init pleb_map_io(void)
GPDR &= ~GPIO_ETH0_IRQ;
set_irq_type(GPIO_ETH0_IRQ, IRQT_FALLING);
set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING);
}
MACHINE_START(PLEB, "PLEB")

View file

@ -37,7 +37,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte, entry;
int ret = 0;
int ret;
pgd = pgd_offset(vma->vm_mm, address);
if (pgd_none(*pgd))
@ -54,16 +54,20 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
pte = pte_offset_map(pmd, address);
entry = *pte;
/*
* If this page is present, it's actually being shared.
*/
ret = pte_present(entry);
/*
* If this page isn't present, or is already setup to
* fault (ie, is old), we can safely ignore any issues.
*/
if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
if (ret && pte_val(entry) & shared_pte_mask) {
flush_cache_page(vma, address, pte_pfn(entry));
pte_val(entry) &= ~shared_pte_mask;
set_pte_at(vma->vm_mm, address, pte, entry);
flush_tlb_page(vma, address);
ret = 1;
}
pte_unmap(pte);
return ret;

View file

@ -73,19 +73,19 @@ static int gpio_set_irq_type(u32 irq, u32 type)
void __iomem *reg = port->base;
switch (type) {
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
edge = GPIO_INT_FALL_EDGE;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
edge = GPIO_INT_HIGH_LEV;
break;
default: /* this includes IRQT_BOTHEDGE */
default: /* this includes IRQ_TYPE_EDGE_BOTH */
return -EINVAL;
}

View file

@ -517,13 +517,13 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
u32 gpio_bit = 1 << gpio;
MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
trigger & __IRQT_LOWLVL);
trigger & IRQ_TYPE_LEVEL_LOW);
MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
trigger & __IRQT_HIGHLVL);
trigger & IRQ_TYPE_LEVEL_HIGH);
MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
trigger & __IRQT_RISEDGE);
trigger & IRQ_TYPE_EDGE_RISING);
MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
trigger & __IRQT_FALEDGE);
trigger & IRQ_TYPE_EDGE_FALLING);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
if (trigger != 0)
@ -555,9 +555,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_MPUIO:
reg += OMAP_MPUIO_GPIO_INT_EDGE;
l = __raw_readl(reg);
if (trigger & __IRQT_RISEDGE)
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & __IRQT_FALEDGE)
else if (trigger & IRQ_TYPE_EDGE_FALLING)
l &= ~(1 << gpio);
else
goto bad;
@ -567,9 +567,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_INT_CONTROL;
l = __raw_readl(reg);
if (trigger & __IRQT_RISEDGE)
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & __IRQT_FALEDGE)
else if (trigger & IRQ_TYPE_EDGE_FALLING)
l &= ~(1 << gpio);
else
goto bad;
@ -584,9 +584,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
gpio &= 0x07;
l = __raw_readl(reg);
l &= ~(3 << (gpio << 1));
if (trigger & __IRQT_RISEDGE)
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 2 << (gpio << 1);
if (trigger & __IRQT_FALEDGE)
if (trigger & IRQ_TYPE_EDGE_FALLING)
l |= 1 << (gpio << 1);
if (trigger)
/* Enable wake-up during idle for dynamic tick */
@ -599,9 +599,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_730:
reg += OMAP730_GPIO_INT_CONTROL;
l = __raw_readl(reg);
if (trigger & __IRQT_RISEDGE)
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & __IRQT_FALEDGE)
else if (trigger & IRQ_TYPE_EDGE_FALLING)
l &= ~(1 << gpio);
else
goto bad;
@ -887,7 +887,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio)
_set_gpio_direction(bank, get_gpio_index(gpio), 1);
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
@ -924,7 +924,7 @@ int omap_request_gpio(int gpio)
/* Set trigger to none. You need to enable the desired trigger with
* request_irq() or set_irq_type().
*/
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510) {

View file

@ -9,7 +9,7 @@ config PLAT_S3C24XX
depends on ARCH_S3C2410
default y if ARCH_S3C2410
select NO_IOPORT
select HAVE_GPIO_LIB
select ARCH_REQUIRE_GPIOLIB
help
Base platform code for any Samsung S3C24XX device

View file

@ -292,27 +292,27 @@ s3c_irqext_type(unsigned int irq, unsigned int type)
/* Set the external interrupt to pointed trigger type */
switch (type)
{
case IRQT_NOEDGE:
case IRQ_TYPE_NONE:
printk(KERN_WARNING "No edge setting!\n");
break;
case IRQT_RISING:
case IRQ_TYPE_EDGE_RISING:
newvalue = S3C2410_EXTINT_RISEEDGE;
break;
case IRQT_FALLING:
case IRQ_TYPE_EDGE_FALLING:
newvalue = S3C2410_EXTINT_FALLEDGE;
break;
case IRQT_BOTHEDGE:
case IRQ_TYPE_EDGE_BOTH:
newvalue = S3C2410_EXTINT_BOTHEDGE;
break;
case IRQT_LOW:
case IRQ_TYPE_LEVEL_LOW:
newvalue = S3C2410_EXTINT_LOWLEV;
break;
case IRQT_HIGH:
case IRQ_TYPE_LEVEL_HIGH:
newvalue = S3C2410_EXTINT_HILEV;
break;

View file

@ -21,6 +21,8 @@
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/atmel-mci.h>
#include <asm/arch/at32ap700x.h>
#include <asm/arch/board.h>
#include <asm/arch/init.h>
@ -260,6 +262,21 @@ void __init setup_board(void)
at32_setup_serial_console(0);
}
#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
/* MMC card detect requires MACB0 *NOT* be used */
#ifdef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM
static struct mci_platform_data __initdata mci0_data = {
.detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */
.wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */
};
#define MCI_PDATA &mci0_data
#else
#define MCI_PDATA NULL
#endif /* SW6 for sd{cd,wp} routing */
#endif /* SW2 for MMC signal routing */
static int __init atstk1002_init(void)
{
/*
@ -309,7 +326,7 @@ static int __init atstk1002_init(void)
at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
#endif
#ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
at32_add_device_mci(0, NULL);
at32_add_device_mci(0, MCI_PDATA);
#endif
#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM
set_hw_addr(at32_add_device_eth(1, &eth_data[1]));

View file

@ -154,7 +154,7 @@ static int __init atstk1003_init(void)
at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
#endif
#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
at32_add_device_mci(0);
at32_add_device_mci(0, NULL);
#endif
at32_add_device_usba(0, NULL);
#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM

View file

@ -137,7 +137,7 @@ static int __init atstk1004_init(void)
at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
#endif
#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
at32_add_device_mci(0);
at32_add_device_mci(0, NULL);
#endif
at32_add_device_lcdc(0, &atstk1000_lcdc_data,
fbmem_start, fbmem_size, 0);

View file

@ -43,6 +43,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = dev_id;
if (unlikely(!(intc_get_pending(0) & 1)))
return IRQ_NONE;
/*
* Disable the interrupt until the clockevent subsystem
* reprograms it.
@ -55,7 +58,8 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_TIMER | IRQF_DISABLED,
/* Oprofile uses the same irq as the timer, so allow it to be shared */
.flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED,
.name = "avr32_comparator",
};

View file

@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/usb/atmel_usba_udc.h>
@ -1285,7 +1286,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct mci_platform_data _data;
struct platform_device *pdev;
struct dw_dma_slave *dws;
if (id != 0)
return NULL;
@ -1300,7 +1300,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
if (!data) {
data = &_data;
memset(data, 0, sizeof(struct mci_platform_data));
memset(data, -1, sizeof(struct mci_platform_data));
data->detect_pin = GPIO_PIN_NONE;
data->wp_pin = GPIO_PIN_NONE;
}
if (platform_device_add_data(pdev, data,
@ -1314,12 +1316,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */
select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */
if (data) {
if (data->detect_pin != GPIO_PIN_NONE)
at32_select_gpio(data->detect_pin, 0);
if (data->wp_pin != GPIO_PIN_NONE)
at32_select_gpio(data->wp_pin, 0);
}
if (gpio_is_valid(data->detect_pin))
at32_select_gpio(data->detect_pin, 0);
if (gpio_is_valid(data->wp_pin))
at32_select_gpio(data->wp_pin, 0);
atmel_mci0_pclk.dev = &pdev->dev;
@ -1853,11 +1853,11 @@ at32_add_device_cf(unsigned int id, unsigned int extint,
if (at32_init_ide_or_cf(pdev, data->cs, extint))
goto fail;
if (data->detect_pin != GPIO_PIN_NONE)
if (gpio_is_valid(data->detect_pin))
at32_select_gpio(data->detect_pin, AT32_GPIOF_DEGLITCH);
if (data->reset_pin != GPIO_PIN_NONE)
if (gpio_is_valid(data->reset_pin))
at32_select_gpio(data->reset_pin, 0);
if (data->vcc_pin != GPIO_PIN_NONE)
if (gpio_is_valid(data->vcc_pin))
at32_select_gpio(data->vcc_pin, 0);
/* READY is used as extint, so we can't select it as gpio */
@ -1937,9 +1937,11 @@ static struct clk atmel_ac97c0_pclk = {
.index = 10,
};
struct platform_device *__init at32_add_device_ac97c(unsigned int id)
struct platform_device *__init
at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data)
{
struct platform_device *pdev;
struct ac97c_platform_data _data;
if (id != 0)
return NULL;
@ -1950,19 +1952,37 @@ struct platform_device *__init at32_add_device_ac97c(unsigned int id)
if (platform_device_add_resources(pdev, atmel_ac97c0_resource,
ARRAY_SIZE(atmel_ac97c0_resource)))
goto err_add_resources;
goto fail;
select_peripheral(PB(20), PERIPH_B, 0); /* SYNC */
select_peripheral(PB(21), PERIPH_B, 0); /* SDO */
select_peripheral(PB(22), PERIPH_B, 0); /* SDI */
select_peripheral(PB(23), PERIPH_B, 0); /* SCLK */
if (!data) {
data = &_data;
memset(data, 0, sizeof(struct ac97c_platform_data));
data->reset_pin = GPIO_PIN_NONE;
}
data->dma_rx_periph_id = 3;
data->dma_tx_periph_id = 4;
data->dma_controller_id = 0;
if (platform_device_add_data(pdev, data,
sizeof(struct ac97c_platform_data)))
goto fail;
select_peripheral(PB(20), PERIPH_B, 0); /* SDO */
select_peripheral(PB(21), PERIPH_B, 0); /* SYNC */
select_peripheral(PB(22), PERIPH_B, 0); /* SCLK */
select_peripheral(PB(23), PERIPH_B, 0); /* SDI */
/* TODO: gpio_is_valid(data->reset_pin) with kernel 2.6.26. */
if (data->reset_pin != GPIO_PIN_NONE)
at32_select_gpio(data->reset_pin, 0);
atmel_ac97c0_pclk.dev = &pdev->dev;
platform_device_add(pdev);
return pdev;
err_add_resources:
fail:
platform_device_put(pdev);
return NULL;
}

View file

@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
PAGE_KERNEL));
local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
return;
local_irq_restore(saved_psr);
spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)
local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
return;
local_irq_restore(saved_psr);
status = ia64_pal_vp_exit_env(host_iva);
if (status)
@ -1253,6 +1253,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
uninit:
kvm_vcpu_uninit(vcpu);
fail:
local_irq_restore(psr);
return r;
}

View file

@ -177,7 +177,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
vcpu->arch.msr & MSR_PR);
}
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid)
{
unsigned int pid = asid & 0xff;
int i;
@ -191,7 +192,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
if (!get_tlb_v(stlbe))
continue;
if (eaddr < get_tlb_eaddr(stlbe))
if (eend < get_tlb_eaddr(stlbe))
continue;
if (eaddr > get_tlb_end(stlbe))

View file

@ -137,7 +137,7 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
if (tlbe->word0 & PPC44x_TLB_VALID) {
eaddr = get_tlb_eaddr(tlbe);
asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
kvmppc_mmu_invalidate(vcpu, eaddr, asid);
kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
}
switch (ws) {

View file

@ -18,11 +18,11 @@
#include <asm/uaccess.h>
static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
u64 guestaddr)
unsigned long guestaddr)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;
if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
return (void __user *) guestaddr;
}
static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u64 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
return get_user(*result, (u64 __user *) uptr);
return get_user(*result, (unsigned long __user *) uptr);
}
static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u32 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u32 __user *) uptr);
}
static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u16 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u16 __user *) uptr);
}
static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u8 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u8 __user *) uptr);
}
static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u64 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u64 __user *) uptr);
}
static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u32 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u32 __user *) uptr);
}
static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u16 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u16 __user *) uptr);
}
static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u8 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
}
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
int rc;
@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
return 0;
}
static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
const void *from, unsigned long n)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
@ -189,7 +190,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
}
static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc,
unsigned long n)
{
int rc;
unsigned long i;
@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
}
static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc, unsigned long n)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;
if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
goto slowpath;
@ -238,11 +240,12 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;
if (guestdest + n > memsize)
return -EFAULT;
@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
}
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc,
unsigned long n)
{
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;
if (guestsrc + n > memsize)
return -EFAULT;

View file

@ -20,7 +20,7 @@
#include "kvm-s390.h"
#include "gaccess.h"
static int handle_lctg(struct kvm_vcpu *vcpu)
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
@ -30,7 +30,7 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
u64 useraddr;
int reg, rc;
vcpu->stat.instruction_lctg++;
vcpu->stat.instruction_lctlg++;
if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
return -ENOTSUPP;
@ -38,9 +38,12 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];
if (useraddr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
reg = reg1;
VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);
do {
@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];
if (useraddr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);
@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_priv,
[0xb7] = handle_lctl,
[0xeb] = handle_lctg,
[0xeb] = handle_lctlg,
};
static int handle_noop(struct kvm_vcpu *vcpu)

View file

@ -13,6 +13,7 @@
#include <asm/lowcore.h>
#include <asm/uaccess.h>
#include <linux/kvm_host.h>
#include <linux/signal.h>
#include "kvm-s390.h"
#include "gaccess.h"
@ -246,15 +247,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
default:
BUG();
}
if (exception) {
VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
" interrupt");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (inti->type == KVM_S390_PROGRAM_INT) {
printk(KERN_WARNING "kvm: recursive program check\n");
BUG();
}
printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n");
do_exit(SIGKILL);
}
}
@ -277,14 +273,11 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;
if (exception) {
VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
" ckc interrupt");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
return 0;
printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n");
do_exit(SIGKILL);
}
return 1;
}

View file

@ -39,7 +39,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_instruction", VCPU_STAT(exit_instruction) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "instruction_lctg", VCPU_STAT(instruction_lctg) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
@ -112,7 +112,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
int kvm_dev_ioctl_check_extension(long ext)
{
return 0;
switch (ext) {
case KVM_CAP_USER_MEMORY:
return 1;
default:
return 0;
}
}
/* Section: vm related */

View file

@ -43,7 +43,8 @@
#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int rc;
@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
}
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg)
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;

View file

@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP
1:
#endif /* CONFIG_SMP */
jmp *(initial_code)
.align 4
ENTRY(initial_code)
.long i386_start_kernel
/*
* We depend on ET to be correct. This checks for 287/387.
@ -601,6 +598,11 @@ ignore_int:
#endif
iret
.section .cpuinit.data,"wa"
.align 4
ENTRY(initial_code)
.long i386_start_kernel
.section .text
/*
* Real beginning of normal "text" segment

View file

@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
@ -1870,6 +1871,12 @@ void kvm_enable_tdp(void)
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);
void kvm_disable_tdp(void)
{
tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;

View file

@ -453,7 +453,8 @@ static __init int svm_hardware_setup(void)
if (npt_enabled) {
printk(KERN_INFO "kvm: Nested Paging enabled\n");
kvm_enable_tdp();
}
} else
kvm_disable_tdp();
return 0;
@ -1007,10 +1008,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address;
u32 error_code;
bool event_injection = false;
if (!irqchip_in_kernel(kvm) &&
is_external_interrupt(exit_int_info))
is_external_interrupt(exit_int_info)) {
event_injection = true;
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
}
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
@ -1024,6 +1028,8 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
(u32)fault_address, (u32)(fault_address >> 32),
handler);
if (event_injection)
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
}

View file

@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
cr2 = vmcs_readl(EXIT_QUALIFICATION);
KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
(u32)((u64)cr2 >> 32), handler);
if (vect_info & VECTORING_INFO_VALID_MASK)
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
}
@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return ERR_PTR(-ENOMEM);
allocate_vpid(vmx);
if (id == 0 && vm_need_ept()) {
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
VMX_EPT_WRITABLE_MASK |
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
VMX_EPT_FAKE_DIRTY_MASK, 0ull,
VMX_EPT_EXECUTABLE_MASK);
kvm_enable_tdp();
}
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
@ -3303,8 +3296,17 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
if (cpu_has_vmx_ept())
if (vm_need_ept()) {
bypass_guest_pf = 0;
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
VMX_EPT_WRITABLE_MASK |
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
VMX_EPT_FAKE_DIRTY_MASK, 0ull,
VMX_EPT_EXECUTABLE_MASK);
kvm_enable_tdp();
} else
kvm_disable_tdp();
if (bypass_guest_pf)
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);

View file

@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
kvm_desct->base |= seg_desc->base2 << 24;
kvm_desct->limit = seg_desc->limit0;
kvm_desct->limit |= seg_desc->limit << 16;
if (seg_desc->g) {
kvm_desct->limit <<= 12;
kvm_desct->limit |= 0xfff;
}
kvm_desct->selector = selector;
kvm_desct->type = seg_desc->type;
kvm_desct->present = seg_desc->p;
@ -3223,6 +3227,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
struct desc_struct *seg_desc)
{
gpa_t gpa;
struct descriptor_table dtable;
u16 index = selector >> 3;
@ -3232,13 +3237,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
return 1;
}
return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
gpa += index * 8;
return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
}
/* allowed just for 8 bytes segments */
static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
struct desc_struct *seg_desc)
{
gpa_t gpa;
struct descriptor_table dtable;
u16 index = selector >> 3;
@ -3246,7 +3254,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
if (dtable.limit < index * 8 + 7)
return 1;
return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
gpa += index * 8;
return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
}
static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@ -3258,55 +3268,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
base_addr |= (seg_desc->base1 << 16);
base_addr |= (seg_desc->base2 << 24);
return base_addr;
}
static int load_tss_segment32(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc,
struct tss_segment_32 *tss)
{
u32 base_addr;
base_addr = get_tss_base_addr(vcpu, seg_desc);
return kvm_read_guest(vcpu->kvm, base_addr, tss,
sizeof(struct tss_segment_32));
}
static int save_tss_segment32(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc,
struct tss_segment_32 *tss)
{
u32 base_addr;
base_addr = get_tss_base_addr(vcpu, seg_desc);
return kvm_write_guest(vcpu->kvm, base_addr, tss,
sizeof(struct tss_segment_32));
}
static int load_tss_segment16(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc,
struct tss_segment_16 *tss)
{
u32 base_addr;
base_addr = get_tss_base_addr(vcpu, seg_desc);
return kvm_read_guest(vcpu->kvm, base_addr, tss,
sizeof(struct tss_segment_16));
}
static int save_tss_segment16(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc,
struct tss_segment_16 *tss)
{
u32 base_addr;
base_addr = get_tss_base_addr(vcpu, seg_desc);
return kvm_write_guest(vcpu->kvm, base_addr, tss,
sizeof(struct tss_segment_16));
return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
}
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@ -3466,20 +3428,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
}
static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
struct desc_struct *cseg_desc,
u32 old_tss_base,
struct desc_struct *nseg_desc)
{
struct tss_segment_16 tss_segment_16;
int ret = 0;
if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
sizeof tss_segment_16))
goto out;
save_state_to_tss16(vcpu, &tss_segment_16);
save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
sizeof tss_segment_16))
goto out;
if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
&tss_segment_16, sizeof tss_segment_16))
goto out;
if (load_state_from_tss16(vcpu, &tss_segment_16))
goto out;
@ -3489,20 +3457,26 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
}
static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
struct desc_struct *cseg_desc,
u32 old_tss_base,
struct desc_struct *nseg_desc)
{
struct tss_segment_32 tss_segment_32;
int ret = 0;
if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
sizeof tss_segment_32))
goto out;
save_state_to_tss32(vcpu, &tss_segment_32);
save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
sizeof tss_segment_32))
goto out;
if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
&tss_segment_32, sizeof tss_segment_32))
goto out;
if (load_state_from_tss32(vcpu, &tss_segment_32))
goto out;
@ -3517,16 +3491,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
struct desc_struct cseg_desc;
struct desc_struct nseg_desc;
int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
/* FIXME: Handle errors. Failure to read either TSS or their
* descriptors should generate a pagefault.
*/
if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
goto out;
if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
goto out;
if (reason != TASK_SWITCH_IRET) {
int cpl;
@ -3544,8 +3522,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
cseg_desc.type &= ~(1 << 1); //clear the B flag
save_guest_segment_descriptor(vcpu, tr_seg.selector,
&cseg_desc);
save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
}
if (reason == TASK_SWITCH_IRET) {
@ -3557,10 +3534,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
&nseg_desc);
else
ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
&nseg_desc);
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {

View file

@ -169,7 +169,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq)
set_irq_type(irq, IRQT_RISING);
set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Setup expansion bus chip selects */
*data->cs0_cfg = data->cs0_bits;

View file

@ -125,15 +125,15 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
ssize_t ret;
if (flashdebug)
printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
"buffer=%p, count=0x%X.\n", p, buf, count);
printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
"buffer=%p, count=0x%zx.\n", *ppos, buf, size);
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
ret = simple_read_from_buffer(buf, size, ppos, FLASH_BASE, gbFlashSize);
ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
mutex_unlock(&nwflash_mutex);
return ret;

View file

@ -99,6 +99,9 @@ struct talitos_private {
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
/* per-channel number of requests pending in channel h/w fifo */
atomic_t *submit_count;
/* per-channel request fifo */
struct talitos_request **fifo;
@ -263,15 +266,15 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
spin_lock_irqsave(&priv->head_lock[ch], flags);
head = priv->head[ch];
request = &priv->fifo[ch][head];
if (request->desc) {
/* request queue is full */
if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
/* h/w fifo is full */
spin_unlock_irqrestore(&priv->head_lock[ch], flags);
return -EAGAIN;
}
head = priv->head[ch];
request = &priv->fifo[ch][head];
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
DMA_BIDIRECTIONAL);
@ -335,6 +338,9 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
atomic_dec(&priv->submit_count[ch]);
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
@ -842,7 +848,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/* adjust (decrease) last one (or two) entry's len to cryptlen */
link_tbl_ptr--;
while (link_tbl_ptr->len <= (-cryptlen)) {
while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
/* Empty this entry, and move to previous one */
cryptlen += be16_to_cpu(link_tbl_ptr->len);
link_tbl_ptr->len = 0;
@ -874,7 +880,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
unsigned int ivsize;
int sg_count;
int sg_count, ret;
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
@ -978,7 +984,12 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
return talitos_submit(dev, desc, callback, areq);
ret = talitos_submit(dev, desc, callback, areq);
if (ret != -EINPROGRESS) {
ipsec_esp_unmap(dev, edesc, areq);
kfree(edesc);
}
return ret;
}
@ -1009,6 +1020,8 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
@ -1022,7 +1035,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
dst_nents = src_nents;
} else {
dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
dst_nents = (dst_nents == 1) ? 0 : src_nents;
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
/*
@ -1040,7 +1053,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
alloc_len += icv_stashing ? ctx->authsize : 0;
}
edesc = kmalloc(alloc_len, GFP_DMA);
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
dev_err(ctx->dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
@ -1337,6 +1350,7 @@ static int __devexit talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
kfree(priv->submit_count);
kfree(priv->tail);
kfree(priv->head);
@ -1466,9 +1480,6 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
of_node_put(np);
np = NULL;
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
@ -1504,6 +1515,16 @@ static int talitos_probe(struct of_device *ofdev,
}
}
priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
GFP_KERNEL);
if (!priv->submit_count) {
dev_err(dev, "failed to allocate fifo submit count space\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++)
atomic_set(&priv->submit_count[i], -priv->chfifo_len);
priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
if (!priv->head || !priv->tail) {
@ -1559,8 +1580,6 @@ static int talitos_probe(struct of_device *ofdev,
err_out:
talitos_remove(ofdev);
if (np)
of_node_put(np);
return err;
}

View file

@ -16,8 +16,13 @@ config FIREWIRE
enable the new stack.
To compile this driver as a module, say M here: the module will be
called firewire-core. It functionally replaces ieee1394, raw1394,
and video1394.
called firewire-core.
This module functionally replaces ieee1394, raw1394, and video1394.
To access it from application programs, you generally need at least
libraw1394 version 2. IIDC/DCAM applications also need libdc1394
version 2. No libraries are required to access storage devices
through the firewire-sbp2 driver.
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"

View file

@ -539,7 +539,7 @@ fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
cancel_delayed_work_sync(&card->work);
fw_flush_transactions(card);
WARN_ON(!list_empty(&card->transaction_list));
del_timer_sync(&card->flush_timer);
}
EXPORT_SYMBOL(fw_core_remove_card);

View file

@ -382,9 +382,9 @@ complete_transaction(struct fw_card *card, int rcode,
response->response.type = FW_CDEV_EVENT_RESPONSE;
response->response.rcode = rcode;
queue_event(client, &response->event,
&response->response, sizeof(response->response),
response->response.data, response->response.length);
queue_event(client, &response->event, &response->response,
sizeof(response->response) + response->response.length,
NULL, 0);
}
static int ioctl_send_request(struct client *client, void *buffer)

View file

@ -171,7 +171,6 @@ struct iso_context {
struct fw_ohci {
struct fw_card card;
u32 version;
__iomem char *registers;
dma_addr_t self_id_bus;
__le32 *self_id_cpu;
@ -180,6 +179,8 @@ struct fw_ohci {
int generation;
int request_generation; /* for timestamping incoming requests */
u32 bus_seconds;
bool use_dualbuffer;
bool old_uninorth;
bool bus_reset_packet_quirk;
@ -1885,7 +1886,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
} else {
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
if (ohci->version >= OHCI_VERSION_1_1)
if (ohci->use_dualbuffer)
callback = handle_ir_dualbuffer_packet;
else
callback = handle_ir_packet_per_buffer;
@ -1949,7 +1950,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
if (ohci->version >= OHCI_VERSION_1_1)
if (ohci->use_dualbuffer)
control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
@ -2279,7 +2280,7 @@ ohci_queue_iso(struct fw_iso_context *base,
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
else if (ctx->context.ohci->use_dualbuffer)
retval = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
else
@ -2341,7 +2342,7 @@ static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int err;
size_t size;
@ -2366,12 +2367,6 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
pci_set_drvdata(dev, ohci);
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
#endif
ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
spin_lock_init(&ohci->lock);
tasklet_init(&ohci->bus_reset_tasklet,
@ -2390,6 +2385,23 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto fail_iomem;
}
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
#if !defined(CONFIG_X86_32)
/* dual-buffer mode is broken with descriptor addresses above 2G */
if (dev->vendor == PCI_VENDOR_ID_TI &&
dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
ohci->use_dualbuffer = false;
#endif
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
#endif
ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@ -2441,9 +2453,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
if (err < 0)
goto fail_self_id;
ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
dev->dev.bus_id, version >> 16, version & 0xff);
return 0;
fail_self_id:

View file

@ -510,8 +510,6 @@ fw_core_handle_bus_reset(struct fw_card *card,
struct fw_node *local_node;
unsigned long flags;
fw_flush_transactions(card);
spin_lock_irqsave(&card->lock, flags);
/*

View file

@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@ -151,7 +152,7 @@ transmit_complete_callback(struct fw_packet *packet,
static void
fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int node_id, int source_id, int generation, int speed,
int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length)
{
int ext_tcode;
@ -166,7 +167,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
HEADER_RETRY(RETRY_X) |
HEADER_TLABEL(tlabel) |
HEADER_TCODE(tcode) |
HEADER_DESTINATION(node_id);
HEADER_DESTINATION(destination_id);
packet->header[1] =
HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
packet->header[2] =
@ -252,7 +253,7 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
fw_transaction_callback_t callback, void *callback_data)
{
unsigned long flags;
int tlabel, source;
int tlabel;
/*
* Bump the flush timer up 100ms first of all so we
@ -268,7 +269,6 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
spin_lock_irqsave(&card->lock, flags);
source = card->node_id;
tlabel = card->current_tlabel;
if (card->tlabel_mask & (1 << tlabel)) {
spin_unlock_irqrestore(&card->lock, flags);
@ -279,77 +279,58 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
card->tlabel_mask |= (1 << tlabel);
list_add_tail(&t->link, &card->transaction_list);
spin_unlock_irqrestore(&card->lock, flags);
/* Initialize rest of transaction, fill out packet and send it. */
t->node_id = node_id;
t->tlabel = tlabel;
t->callback = callback;
t->callback_data = callback_data;
fw_fill_request(&t->packet, tcode, t->tlabel,
node_id, source, generation,
speed, offset, payload, length);
fw_fill_request(&t->packet, tcode, t->tlabel, node_id, card->node_id,
generation, speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
list_add_tail(&t->link, &card->transaction_list);
spin_unlock_irqrestore(&card->lock, flags);
card->driver->send_request(card, &t->packet);
}
EXPORT_SYMBOL(fw_send_request);
struct fw_phy_packet {
struct fw_packet packet;
struct completion done;
struct kref kref;
};
static void phy_packet_release(struct kref *kref)
{
struct fw_phy_packet *p =
container_of(kref, struct fw_phy_packet, kref);
kfree(p);
}
static DEFINE_MUTEX(phy_config_mutex);
static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct fw_phy_packet *p =
container_of(packet, struct fw_phy_packet, packet);
complete(&p->done);
kref_put(&p->kref, phy_packet_release);
complete(&phy_config_done);
}
static struct fw_packet phy_config_packet = {
.header_length = 8,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
};
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
struct fw_phy_packet *p;
long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
PHY_CONFIG_ROOT_ID(node_id) |
PHY_CONFIG_GAP_COUNT(gap_count);
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return;
mutex_lock(&phy_config_mutex);
p->packet.header[0] = data;
p->packet.header[1] = ~data;
p->packet.header_length = 8;
p->packet.payload_length = 0;
p->packet.speed = SCODE_100;
p->packet.generation = generation;
p->packet.callback = transmit_phy_packet_callback;
init_completion(&p->done);
kref_set(&p->kref, 2);
phy_config_packet.header[0] = data;
phy_config_packet.header[1] = ~data;
phy_config_packet.generation = generation;
INIT_COMPLETION(phy_config_done);
card->driver->send_request(card, &p->packet);
timeout = wait_for_completion_timeout(&p->done, timeout);
kref_put(&p->kref, phy_packet_release);
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
/* will leak p if the callback is never executed */
WARN_ON(timeout == 0);
mutex_unlock(&phy_config_mutex);
}
void fw_flush_transactions(struct fw_card *card)

View file

@ -195,7 +195,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
{
if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
/* Disable Interrupt */
set_irq_type(corgi_ts->irq_gpio, IRQT_NOEDGE);
set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
if (read_xydata(corgi_ts)) {
corgi_ts->pendown = 1;
new_data(corgi_ts);
@ -214,7 +214,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
}
/* Enable Falling Edge */
set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
corgi_ts->pendown = 0;
}
}
@ -258,7 +258,7 @@ static int corgits_resume(struct platform_device *dev)
corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
/* Enable Falling Edge */
set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
corgi_ts->power_mode = PWR_MODE_ACTIVE;
return 0;
@ -333,7 +333,7 @@ static int __init corgits_probe(struct platform_device *pdev)
corgi_ts->power_mode = PWR_MODE_ACTIVE;
/* Enable Falling Edge */
set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
return 0;

View file

@ -198,7 +198,7 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
switch (wm->id) {
case WM9705_ID2:
wm->pen_irq = IRQ_GPIO(4);
set_irq_type(IRQ_GPIO(4), IRQT_BOTHEDGE);
set_irq_type(IRQ_GPIO(4), IRQ_TYPE_EDGE_BOTH);
break;
case WM9712_ID2:
case WM9713_ID2:

View file

@ -7,6 +7,7 @@ config MISDN_HFCPCI
tristate "Support for HFC PCI cards"
depends on MISDN
depends on PCI
depends on VIRT_TO_BUS
help
Enable support for cards with Cologne Chip AG's
HFC PCI chip.

View file

@ -2030,7 +2030,7 @@ release_l2(struct layer2 *l2)
skb_queue_purge(&l2->down_queue);
ReleaseWin(l2);
if (test_bit(FLG_LAPD, &l2->flag)) {
release_tei(l2);
TEIrelease(l2);
if (l2->ch.st)
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
CLOSE_CHANNEL, NULL);

View file

@ -96,7 +96,7 @@ extern int tei_l2(struct layer2 *, u_int, u_long arg);
/* from tei.c */
extern int l2_tei(struct layer2 *, u_int, u_long arg);
extern void release_tei(struct layer2 *);
extern void TEIrelease(struct layer2 *);
extern int TEIInit(u_int *);
extern void TEIFree(void);

View file

@ -945,7 +945,7 @@ l2_tei(struct layer2 *l2, u_int cmd, u_long arg)
}
void
release_tei(struct layer2 *l2)
TEIrelease(struct layer2 *l2)
{
struct teimgr *tm = l2->tm;
u_long flags;

View file

@ -147,9 +147,12 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
@ -548,6 +551,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
{
int r;
struct pgpath *p;
struct multipath *m = ti->private;
/* we need at least a path arg */
if (as->argc < 1) {
@ -566,6 +570,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
goto bad;
}
if (m->hw_handler_name) {
r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
m->hw_handler_name);
if (r < 0) {
dm_put_device(ti, p->path.dev);
goto bad;
}
}
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);

View file

@ -273,12 +273,12 @@ mpt_fault_reset_work(struct work_struct *work)
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__FUNCTION__, (rc == 0) ? "success" : "failed");
__func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
@ -356,7 +356,7 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__FUNCTION__, ioc->name, cb_idx);
__func__, ioc->name, cb_idx);
goto out;
}
@ -420,7 +420,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__FUNCTION__, ioc->name, cb_idx);
__func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
@ -2434,7 +2434,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
"adapter\n", __FUNCTION__, ioc->name));
"adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
@ -3693,7 +3693,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
"address=%p\n", ioc->name, __FUNCTION__,
"address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
if (sleepFlag == CAN_SLEEP)
@ -4742,12 +4742,12 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
break;
}
printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk("%s: no msg frames!\n",__FUNCTION__);
printk("%s: no msg frames!\n",__func__);
return -1;
}
@ -4771,13 +4771,13 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
(SasIoUnitControlReply_t *)ioc->persist_reply_frame;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
__FUNCTION__,
__func__,
sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
return -1;
}
printk("%s: success\n",__FUNCTION__);
printk("%s: success\n",__func__);
return 0;
}
@ -5784,7 +5784,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
return -1;
}

View file

@ -505,7 +505,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
event = le32_to_cpu(pEvReply->Event) & 0xFF;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
ioc->name, __FUNCTION__));
ioc->name, __func__));
if(async_queue == NULL)
return 1;
@ -2482,7 +2482,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
goto out;
}

View file

@ -231,28 +231,28 @@ static int
mptfc_abort(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
}
static int
mptfc_dev_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
}
static int
mptfc_bus_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
}
static int
mptfc_host_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
}
static void

View file

@ -610,7 +610,7 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, sent));
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@ -676,7 +676,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, sent));
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@ -715,7 +715,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
u16 cur_naa = 0x1000;
dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
__FUNCTION__, skb));
__func__, skb));
spin_lock_irqsave(&priv->txfidx_lock, flags);
if (priv->mpt_txfidx_tail < 0) {
@ -723,7 +723,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: no tx context available: %u\n",
__FUNCTION__, priv->mpt_txfidx_tail);
__func__, priv->mpt_txfidx_tail);
return 1;
}
@ -733,7 +733,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__FUNCTION__);
__func__);
return 1;
}
@ -1208,7 +1208,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, buckets, curr));
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
@ -1217,9 +1217,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__FUNCTION__);
__func__);
dioprintk((KERN_ERR "%s: %u buckets remaining\n",
__FUNCTION__, buckets));
__func__, buckets));
goto out;
}
pRecvReq = (LANReceivePostRequest_t *) mf;
@ -1244,7 +1244,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
spin_lock_irqsave(&priv->rxfidx_lock, flags);
if (priv->mpt_rxfidx_tail < 0) {
printk (KERN_ERR "%s: Can't alloc context\n",
__FUNCTION__);
__func__);
spin_unlock_irqrestore(&priv->rxfidx_lock,
flags);
break;
@ -1267,7 +1267,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (skb == NULL) {
printk (KERN_WARNING
MYNAM "/%s: Can't alloc skb\n",
__FUNCTION__);
__func__);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
break;
@ -1305,7 +1305,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (pSimple == NULL) {
/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
/**/ __FUNCTION__);
/**/ __func__);
mpt_free_msg_frame(mpt_dev, mf);
goto out;
}
@ -1329,9 +1329,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
out:
dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
__FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
__func__, buckets, atomic_read(&priv->buckets_out)));
dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
__FUNCTION__, priv->total_posted, priv->total_received));
__func__, priv->total_posted, priv->total_received));
clear_bit(0, &priv->post_buckets_active);
}

View file

@ -300,7 +300,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
phy_info = port_info->phy_info;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details,
"bitmask=0x%016llX\n", ioc->name, __func__, port_details,
port_details->num_phys, (unsigned long long)
port_details->phy_bitmask));
@ -411,7 +411,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
*/
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: deleting phy = %d\n",
ioc->name, __FUNCTION__, port_details, i));
ioc->name, __func__, port_details, i));
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@ -497,7 +497,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: phy_id=%02d num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __FUNCTION__,
"bitmask=0x%016llX\n", ioc->name, __func__,
port_details, i, port_details->num_phys,
(unsigned long long)port_details->phy_bitmask));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
@ -553,7 +553,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return 0;
}
@ -606,7 +606,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
GFP_ATOMIC);
if (!target_reset_list) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return;
}
@ -673,7 +673,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return;
}
@ -1183,7 +1183,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo);
ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
@ -1270,14 +1270,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len,
ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
@ -1343,7 +1343,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!timeleft) {
printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__);
printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
/* On timeout reset the board */
mpt_HardResetHandler(ioc, CAN_SLEEP);
ret = -ETIMEDOUT;
@ -1361,7 +1361,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
rsp->data_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
ret = -ENXIO;
}
unmap:
@ -2006,7 +2006,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
goto out;
}
mptsas_set_port(ioc, phy_info, port);
@ -2076,7 +2076,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
goto out;
}
@ -2085,7 +2085,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
sas_rphy_free(rphy);
goto out;
}
@ -2613,7 +2613,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
phy_info = mptsas_find_phyinfo_by_sas_address(
@ -2633,20 +2633,20 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info){
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
if (!phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
rphy = mptsas_get_rphy(phy_info);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
@ -2654,7 +2654,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
@ -2665,7 +2665,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
@ -2720,7 +2720,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
@ -2732,7 +2732,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info || !phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
@ -2744,7 +2744,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
/*
@ -2767,7 +2767,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (mptsas_get_rphy(phy_info)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
if (ev->channel) printk("%d\n", __LINE__);
break;
}
@ -2776,7 +2776,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
memcpy(&phy_info->attached, &sas_device,
@ -2801,7 +2801,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break; /* non-fatal: an rphy can be added later */
}
@ -2809,7 +2809,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (sas_rphy_add(rphy)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
sas_rphy_free(rphy);
break;
}

View file

@ -461,7 +461,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
return;
}
@ -2187,7 +2187,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
(ioc->debug_level & MPT_DEBUG_TM ))
printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
"iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
"term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus,
"term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
pScsiTmReply->TargetID, pScsiTmReq->TaskType,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,

View file

@ -256,28 +256,28 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
bank + ASIC3_GPIO_TRIGGER_TYPE);
asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
if (type == IRQT_RISING) {
if (type == IRQ_TYPE_EDGE_RISING) {
trigger |= bit;
edge |= bit;
} else if (type == IRQT_FALLING) {
} else if (type == IRQ_TYPE_EDGE_FALLING) {
trigger |= bit;
edge &= ~bit;
} else if (type == IRQT_BOTHEDGE) {
} else if (type == IRQ_TYPE_EDGE_BOTH) {
trigger |= bit;
if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base))
edge &= ~bit;
else
edge |= bit;
asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
} else if (type == IRQT_LOW) {
} else if (type == IRQ_TYPE_LEVEL_LOW) {
trigger &= ~bit;
level &= ~bit;
} else if (type == IRQT_HIGH) {
} else if (type == IRQ_TYPE_LEVEL_HIGH) {
trigger &= ~bit;
level |= bit;
} else {
/*
* if type == IRQT_NOEDGE, we should mask interrupts, but
* if type == IRQ_TYPE_NONE, we should mask interrupts, but
* be careful to not unmask them if mask was also called.
* Probably need internal state for mask.
*/
@ -343,7 +343,7 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
ASIC3_INTMASK_GINTMASK);
set_irq_chained_handler(asic->irq_nr, asic3_irq_demux);
set_irq_type(asic->irq_nr, IRQT_RISING);
set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
set_irq_data(asic->irq_nr, asic);
return 0;

View file

@ -324,7 +324,7 @@ static void tc6393xb_attach_irq(struct platform_device *dev)
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_type(tc6393xb->irq, IRQT_FALLING);
set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
set_irq_data(tc6393xb->irq, tc6393xb);
set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
}

Some files were not shown because too many files have changed in this diff Show more