From 69dc8044c34200e3f802d1941d53b5247bb13de6 Mon Sep 17 00:00:00 2001
From: "Boppana Prasad, Rajendra" <rajendra.b@intel.com>
Date: Mon, 13 Nov 2017 10:51:33 +0100
Subject: [PATCH] Merge pull request #160 in SW_PON/linux from
 UGW_SW-18320-Ingegrate-LEDE-generic-patches to xrx500

* commit 'c7f3673b32897465fee22a551cc071731dd7a5d7': (98 commits)
  bridge: port isolate
  bridge: only accept EAP locally
  lzma: de-bloat the lzma library used by jffs2
  hack: kernel: add generic image_cmdline hack to MIPS targets
  rfkill: add fake rfkill support
  kernel: prevent cryptomgr from pulling in useless extra dependencies for tests that are not run
  kernel: move regmap bloat out of the kernel image if it is only being used in modules
  kconfig: owrt specific dependencies
  hack: net: remove bogus netfilter dependencies
  use the openwrt lzma options for now
  kernel: fix linux/spi/spidev.h portability issues with musl
  linux-3.6: fix portability of some includes files in tools/ used on the host
  kernel: fix linux 4.9 host tools portability issues
  kernel: do not build modules.order
  build: add a hack for removing non-essential module info
  kernel: strip unnecessary symbol table information from kernel modules
  serial: do not accept sysrq characters via serial port
  libata: add ledtrig support
  usb: Remove annoying warning about bogus URB
  usb: Remove annoying warning about bogus URB
  ...
---
 .../devicetree/bindings/mtd/nand.txt          |   16 +
 Makefile                                      |   14 +-
 arch/mips/Kconfig                             |   18 +
 arch/mips/Makefile                            |    6 +-
 arch/mips/boot/compressed/Makefile            |    3 +-
 arch/mips/include/asm/dsemul.h                |   23 +
 arch/mips/include/asm/fpu.h                   |    4 +-
 arch/mips/include/asm/fpu_emulator.h          |   11 +
 arch/mips/include/asm/mach-generic/spaces.h   |    2 +-
 arch/mips/include/asm/string.h                |   38 +
 arch/mips/kernel/head.S                       |    6 +
 arch/mips/kernel/machine_kexec.c              |  140 +-
 arch/mips/kernel/machine_kexec.h              |   20 +
 arch/mips/kernel/relocate_kernel.S            |   21 +-
 arch/mips/lib/Makefile                        |    2 +-
 arch/mips/lib/memcmp.c                        |   22 +
 arch/mips/mm/cache.c                          |    7 +
 arch/mips/pci/pci-legacy.c                    |    2 +-
 arch/mips/vdso/Makefile                       |    4 +-
 crypto/Kconfig                                |   23 +-
 crypto/algboss.c                              |    4 +
 drivers/ata/Kconfig                           |   16 +
 drivers/ata/libata-core.c                     |   41 +
 drivers/base/regmap/Kconfig                   |   18 +-
 drivers/base/regmap/Makefile                  |   11 +-
 drivers/base/regmap/regcache.c                |    2 +
 drivers/base/regmap/regmap.c                  |    3 +
 drivers/bcma/Kconfig                          |    1 +
 drivers/leds/Makefile                         |    1 +
 drivers/leds/led-class.c                      |   26 +-
 drivers/leds/leds-gpio.c                      |   12 +-
 drivers/leds/ledtrig-netdev.c                 |  444 ++++
 drivers/leds/trigger/Kconfig                  |    7 +
 drivers/mtd/chips/cfi_cmdset_0002.c           |    3 +-
 drivers/mtd/devices/block2mtd.c               |   33 +-
 drivers/mtd/maps/physmap_of.c                 |   46 +-
 drivers/mtd/mtdcore.c                         |   10 +
 drivers/mtd/mtdpart.c                         |  194 +-
 drivers/mtd/ofpart.c                          |    1 +
 drivers/mtd/ubi/attach.c                      |   25 +-
 drivers/mtd/ubi/block.c                       |   53 +
 drivers/mtd/ubi/build.c                       |   49 +
 drivers/mtd/ubi/ubi.h                         |    1 +
 drivers/net/phy/phy.c                         |   44 +
 drivers/net/phy/phy_device.c                  |    3 +
 drivers/net/ppp/pppoe.c                       |    4 +-
 drivers/of/of_net.c                           |   77 +-
 drivers/pci/Kconfig                           |    6 +
 drivers/pci/quirks.c                          |    6 +
 drivers/spi/spi.c                             |    2 +-
 drivers/spi/spidev.c                          |    1 +
 drivers/ssb/Kconfig                           |    3 +-
 drivers/usb/core/ledtrig-usbport.c            |   56 +
 drivers/usb/core/urb.c                        |    8 -
 drivers/usb/dwc2/hcd.c                        |    1 -
 drivers/usb/host/ehci-hcd.c                   |    2 +-
 drivers/usb/host/ehci-hub.c                   |    4 +-
 drivers/usb/host/ehci-platform.c              |    2 +
 drivers/usb/host/ehci.h                       |    1 +
 drivers/usb/host/pci-quirks.c                 |   17 +
 drivers/usb/host/pci-quirks.h                 |   16 +
 fs/jffs2/Kconfig                              |    9 +
 fs/jffs2/Makefile                             |    3 +
 fs/jffs2/build.c                              |   10 +
 fs/jffs2/compr.c                              |    6 +
 fs/jffs2/compr.h                              |   10 +-
 fs/jffs2/compr_lzma.c                         |  128 +
 fs/jffs2/dir.c                                |   55 +-
 fs/jffs2/scan.c                               |   21 +-
 fs/jffs2/super.c                              |   33 +-
 fs/ubifs/io.c                                 |   18 +-
 fs/ubifs/sb.c                                 |   13 +-
 fs/ubifs/ubifs.h                              |    9 -
 include/linux/etherdevice.h                   |    5 +
 include/linux/if_bridge.h                     |    2 +
 include/linux/leds.h                          |   14 +-
 include/linux/libata.h                        |    9 +
 include/linux/lzma.h                          |   62 +
 include/linux/lzma/LzFind.h                   |   98 +
 include/linux/lzma/LzHash.h                   |   54 +
 include/linux/lzma/LzmaDec.h                  |  130 +
 include/linux/lzma/LzmaEnc.h                  |   60 +
 include/linux/lzma/Types.h                    |  226 ++
 include/linux/module.h                        |   13 +-
 include/linux/moduleparam.h                   |   15 +-
 include/linux/mtd/mtd.h                       |   13 +
 include/linux/mtd/partitions.h                |    1 +
 include/linux/netdevice.h                     |    2 +
 include/linux/phy.h                           |    7 +
 include/linux/regmap.h                        |    2 +-
 include/linux/rfkill.h                        |    2 +-
 include/linux/serial_core.h                   |    2 +-
 include/linux/skbuff.h                        |    5 +-
 include/linux/tcp.h                           |   12 +-
 include/linux/usb/ehci_pdriver.h              |    1 +
 include/linux/usb/hcd.h                       |    7 +
 include/net/ip6_tunnel.h                      |   13 +
 include/net/netfilter/nf_conntrack_extend.h   |    4 +
 include/net/netfilter/nf_conntrack_rtcache.h  |   34 +
 include/net/netns/ipv6.h                      |    1 +
 include/net/sock.h                            |   51 +-
 include/uapi/linux/fib_rules.h                |    6 +
 include/uapi/linux/icmpv6.h                   |    2 +
 include/uapi/linux/if_link.h                  |    1 +
 include/uapi/linux/if_packet.h                |    3 +
 include/uapi/linux/if_tunnel.h                |   13 +
 include/uapi/linux/jffs2.h                    |    1 +
 include/uapi/linux/netfilter_ipv4/ip_tables.h |    1 +
 include/uapi/linux/rtnetlink.h                |    3 +
 include/uapi/linux/spi/spidev.h               |    2 +-
 init/Kconfig                                  |   18 +
 init/do_mounts.c                              |   27 +-
 kernel/kallsyms.c                             |    8 +
 kernel/module.c                               |    5 +-
 lib/Kconfig                                   |    6 +
 lib/Kconfig.debug                             |    5 +
 lib/Makefile                                  |   12 +
 lib/decompress.c                              |    1 +
 lib/lzma/LzFind.c                             |  522 ++++
 lib/lzma/LzmaDec.c                            |  925 +++++++
 lib/lzma/LzmaEnc.c                            | 2123 +++++++++++++++++
 lib/lzma/Makefile                             |    7 +
 lib/vsprintf.c                                |   15 +-
 net/Makefile                                  |    2 +-
 net/bridge/br_forward.c                       |   42 +-
 net/bridge/br_input.c                         |   22 +-
 net/bridge/br_mdb.c                           |    2 +-
 net/bridge/br_multicast.c                     |   96 +-
 net/bridge/br_netlink.c                       |    5 +
 net/bridge/br_private.h                       |    8 +-
 net/bridge/br_sysfs_if.c                      |    4 +
 net/core/dev.c                                |   48 +
 net/ethernet/eth.c                            |   46 +-
 net/ipv4/fib_semantics.c                      |    4 +
 net/ipv4/fib_trie.c                           |    1 +
 net/ipv4/ipmr.c                               |    1 +
 net/ipv4/netfilter/ip_tables.c                |  120 +-
 net/ipv4/tcp.c                                |    4 +-
 net/ipv4/tcp_ipv4.c                           |    2 +-
 net/ipv4/tcp_output.c                         |   92 +-
 net/ipv4/tcp_timer.c                          |    4 +-
 net/ipv6/fib6_rules.c                         |    4 +
 net/ipv6/ip6_tunnel.c                         |  277 ++-
 net/ipv6/ip6mr.c                              |    2 +
 net/ipv6/route.c                              |   70 +-
 net/ipv6/tcp_ipv6.c                           |    2 +-
 net/netfilter/Kconfig                         |   16 +-
 net/netfilter/Makefile                        |    3 +
 net/netfilter/nf_conntrack_proto_tcp.c        |   13 +
 net/netfilter/nf_conntrack_rtcache.c          |  413 ++++
 net/netfilter/nf_conntrack_standalone.c       |   59 +-
 net/netfilter/nf_nat_core.c                   |    3 +
 net/packet/af_packet.c                        |   35 +-
 net/packet/internal.h                         |    1 +
 net/rfkill/Kconfig                            |   14 +-
 net/rfkill/Makefile                           |    2 +-
 net/wireless/Kconfig                          |   19 +-
 scripts/Makefile.build                        |    2 +-
 scripts/Makefile.lib                          |    2 +-
 scripts/gen_initramfs_list.sh                 |   10 +-
 scripts/kallsyms.c                            |   12 +
 scripts/ld-version.sh                         |    4 +-
 scripts/link-vmlinux.sh                       |    4 +
 scripts/mod/modpost.c                         |   12 +
 sound/core/Kconfig                            |    4 +-
 tools/build/Build.include                     |    2 +-
 tools/include/tools/be_byteshift.h            |    4 +
 tools/include/tools/le_byteshift.h            |    4 +
 tools/include/tools/linux_types.h             |   22 +
 tools/perf/pmu-events/jevents.c               |    1 +
 tools/perf/pmu-events/json.c                  |    1 -
 usr/Makefile                                  |    8 +-
 172 files changed, 7595 insertions(+), 435 deletions(-)
 create mode 100644 arch/mips/kernel/machine_kexec.h
 create mode 100644 arch/mips/lib/memcmp.c
 create mode 100644 drivers/leds/ledtrig-netdev.c
 create mode 100644 fs/jffs2/compr_lzma.c
 create mode 100644 include/linux/lzma.h
 create mode 100644 include/linux/lzma/LzFind.h
 create mode 100644 include/linux/lzma/LzHash.h
 create mode 100644 include/linux/lzma/LzmaDec.h
 create mode 100644 include/linux/lzma/LzmaEnc.h
 create mode 100644 include/linux/lzma/Types.h
 create mode 100644 include/net/netfilter/nf_conntrack_rtcache.h
 create mode 100644 lib/lzma/LzFind.c
 create mode 100644 lib/lzma/LzmaDec.c
 create mode 100644 lib/lzma/LzmaEnc.c
 create mode 100644 lib/lzma/Makefile
 create mode 100644 net/netfilter/nf_conntrack_rtcache.c
 create mode 100644 tools/include/tools/linux_types.h

diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index b05601600..c096ca872 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -44,6 +44,22 @@ Optional NAND chip properties:
 		     used by the upper layers, and you want to make your NAND
 		     as reliable as possible.
 
+- linux,part-probe: list of name as strings of the partition parser
+		    which should be used to parse the partition table.
+		    They will be tried in the specified ordering and
+		    the next one will be used if the previous one
+		    failed.
+
+		    Example: linux,part-probe = "cmdlinepart", "ofpart";
+
+		    This is also the default value, which will be used
+		    if this attribute is not specified. It could be
+		    that the flash driver in use overwrote the default
+		    value and uses some other default.
+
+		    Possible values are: bcm47xxpart, afs, ar7part,
+		    ofoldpart, ofpart, bcm63xxpart, RedBoot, cmdlinepart
+
 The ECC strength and ECC step size properties define the correction capability
 of a controller. Together, they say a controller can correct "{strength} bit
 errors per {size} bytes".
diff --git a/Makefile b/Makefile
index 900cd7c3a..bee0380f2 100644
--- a/Makefile
+++ b/Makefile
@@ -407,15 +407,15 @@ KBUILD_CFLAGS_KERNEL :=
 KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
-KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
 
 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
 KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
 
 export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
-export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP
+export ARCH SRCARCH SUBARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD
+export CC CPP AR NM STRIP OBJCOPY OBJDUMP
 export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
@@ -639,12 +639,12 @@ KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
 endif
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
 else
-KBUILD_CFLAGS   += -O2
+KBUILD_CFLAGS   += -O2 -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
 endif
 endif
 
@@ -1196,7 +1196,6 @@ all: modules
 
 PHONY += modules
 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
-	$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
 	@$(kecho) '  Building modules, stage 2.';
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
@@ -1226,7 +1225,6 @@ _modinst_:
 		rm -f $(MODLIB)/build ; \
 		ln -s $(CURDIR) $(MODLIB)/build ; \
 	fi
-	@cp -f $(objtree)/modules.order $(MODLIB)/
 	@cp -f $(objtree)/modules.builtin $(MODLIB)/
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
 
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0e57620ac..22e529be1 100755
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1151,6 +1151,10 @@ config SYNC_R4K
 config MIPS_MACHINE
 	def_bool n
 
+config IMAGE_CMDLINE_HACK
+	bool "OpenWrt specific image command line hack"
+	default n
+
 config NO_IOPORT_MAP
 	def_bool n
 
@@ -2948,6 +2952,20 @@ config MIPS_O32_FP64_SUPPORT
 
 	  If unsure, say N.
 
+config MIPS_FPU_EMULATOR
+	bool "MIPS FPU Emulator"
+	default y
+	help
+	  This option lets you disable the built-in MIPS FPU (Coprocessor 1)
+	  emulator, which handles floating-point instructions on processors
+	  without a hardware FPU.  It is generally a good idea to keep the
+	  emulator built-in, unless you are perfectly sure you have a
+	  complete soft-float environment.  With the emulator disabled, all
+	  users of float operations will be killed with an illegal instr-
+	  uction exception.
+
+	  Say Y, please.
+
 config USE_OF
 	bool
 	select OF
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 1a6bac7b0..fcd7eafa6 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -90,7 +90,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz
 # machines may also.  Since BFD is incredibly buggy with respect to
 # crossformat linking we rely on the elf2ecoff tool for format conversion.
 #
-cflags-y			+= -G 0 -mno-abicalls -fno-pic -pipe
+cflags-y			+= -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
 cflags-y			+= -msoft-float
 LDFLAGS_vmlinux			+= -G 0 -static -n -nostdlib
 KBUILD_AFLAGS_MODULE		+= -mlong-calls
@@ -143,7 +143,7 @@ cflags-$(CONFIG_CPU_R4X00)	+= -march=r4600 -Wa,--trap
 cflags-$(CONFIG_CPU_TX49XX)	+= -march=r4600 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R1)	+= $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
 			-Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_MIPS32_R2)	+= $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+cflags-$(CONFIG_CPU_MIPS32_R2)	+= $(call cc-option,-march=mips32r2 -mtune=34kc,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
 			-Wa,-mips32r2 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R6)	+= -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1)	+= $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
@@ -287,7 +287,7 @@ OBJCOPYFLAGS		+= --remove-section=.reginfo
 head-y := arch/mips/kernel/head.o
 
 libs-y			+= arch/mips/lib/
-libs-y			+= arch/mips/math-emu/
+libs-$(CONFIG_MIPS_FPU_EMULATOR)	+= arch/mips/math-emu/
 
 # See arch/mips/Kbuild for content of core part of the kernel
 core-y += arch/mips/
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 90aca95fe..3cd3b391e 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -23,7 +23,8 @@ KBUILD_CFLAGS := $(shell echo $(KBUILD_CFLAGS) | sed -e "s/-pg//")
 KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
 
 KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \
-	-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull"
+	-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" \
+	-D__ZBOOT__
 
 KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
 	-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
index a6e067801..d388fbe3b 100644
--- a/arch/mips/include/asm/dsemul.h
+++ b/arch/mips/include/asm/dsemul.h
@@ -41,6 +41,7 @@ struct task_struct;
 extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
 		       unsigned long branch_pc, unsigned long cont_pc);
 
+#ifdef CONFIG_MIPS_FPU_EMULATOR
 /**
  * do_dsemulret() - Return from a delay slot 'emulation' frame
  * @xcp:	User thread register context.
@@ -88,5 +89,27 @@ extern bool dsemul_thread_rollback(struct pt_regs *regs);
  * before @mm is freed in order to avoid memory leaks.
  */
 extern void dsemul_mm_cleanup(struct mm_struct *mm);
+#else
+static inline bool do_dsemulret(struct pt_regs *xcp)
+{
+	return false;
+}
+
+static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+	return false;
+}
+
+static inline bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+	return false;
+}
+
+static inline void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+
+}
+
+#endif
 
 #endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f06f97bd6..f41f2387b 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -227,8 +227,10 @@ static inline int init_fpu(void)
 		/* Restore FRE */
 		write_c0_config5(config5);
 		enable_fpu_hazard();
-	} else
+	} else if (IS_ENABLED(CONFIG_MIPS_FPU_EMULATOR))
 		fpu_emulator_init_fpu();
+	else
+		ret = SIGILL;
 
 	return ret;
 }
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index c05369e0b..966891f71 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -30,6 +30,7 @@
 #include <asm/local.h>
 #include <asm/processor.h>
 
+#ifdef CONFIG_MIPS_FPU_EMULATOR
 #ifdef CONFIG_DEBUG_FS
 
 struct mips_fpu_emulator_stats {
@@ -63,6 +64,16 @@ do {									\
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
 				    struct mips_fpu_struct *ctx, int has_fpu,
 				    void *__user *fault_addr);
+#else	/* no CONFIG_MIPS_FPU_EMULATOR */
+static inline int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+				struct mips_fpu_struct *ctx, int has_fpu,
+				void *__user *fault_addr)
+{
+	*fault_addr = NULL;
+	return SIGILL;	/* we don't speak MIPS FPU */
+}
+#endif	/* CONFIG_MIPS_FPU_EMULATOR */
+
 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 		     struct task_struct *tsk);
 int process_fpemu_return(int sig, void __user *fault_addr,
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 3d6d3b0f4..dd9cc0ae6 100755
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -48,7 +48,7 @@
  * Memory above this physical address will be considered highmem.
  */
 #ifndef HIGHMEM_START
-#define HIGHMEM_START		_AC(0x20000000, UL)
+#define HIGHMEM_START		_AC(0x10000000, UL)
 #endif
 
 #endif /* CONFIG_32BIT */
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
index 29030cb39..7b737f9b6 100644
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -140,4 +140,42 @@ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
 
+#ifndef __ZBOOT__
+#define memset(__s, __c, len)					\
+({								\
+	size_t __len = (len);					\
+	void *__ret;						\
+	if (__builtin_constant_p(len) && __len >= 64)		\
+		__ret = memset((__s), (__c), __len);		\
+	else							\
+		__ret = __builtin_memset((__s), (__c), __len);	\
+	__ret;							\
+})
+
+#define memcpy(dst, src, len)					\
+({								\
+	size_t __len = (len);					\
+	void *__ret;						\
+	if (__builtin_constant_p(len) && __len >= 64)		\
+		__ret = memcpy((dst), (src), __len);		\
+	else							\
+		__ret = __builtin_memcpy((dst), (src), __len);	\
+	__ret;							\
+})
+
+#define memmove(dst, src, len)					\
+({								\
+	size_t __len = (len);					\
+	void *__ret;						\
+	if (__builtin_constant_p(len) && __len >= 64)		\
+		__ret = memmove((dst), (src), __len);		\
+	else							\
+		__ret = __builtin_memmove((dst), (src), __len);	\
+	__ret;							\
+})
+
+#define __HAVE_ARCH_MEMCMP
+#define memcmp(src1, src2, len) __builtin_memcmp((src1), (src2), (len))
+#endif
+
 #endif /* _ASM_STRING_H */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index b075fb246..ff028a9cf 100755
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -79,6 +79,12 @@ FEXPORT(__kernel_entry)
 	j	kernel_entry
 #endif
 
+#ifdef CONFIG_IMAGE_CMDLINE_HACK
+	.ascii	"CMDLINE:"
+EXPORT(__image_cmdline)
+	.fill	0x400
+#endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
 	__REF
 
 NESTED(kernel_entry, 16, sp)			# kernel entry point
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 597252041..0bf669fe3 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -10,14 +10,11 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 
+#include <asm/bootinfo.h>
 #include <asm/cacheflush.h>
 #include <asm/page.h>
-
-extern const unsigned char relocate_new_kernel[];
-extern const size_t relocate_new_kernel_size;
-
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
+#include <asm/uaccess.h>
+#include "machine_kexec.h"
 
 int (*_machine_kexec_prepare)(struct kimage *) = NULL;
 void (*_machine_kexec_shutdown)(void) = NULL;
@@ -28,9 +25,115 @@ atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
 void (*_crash_smp_send_stop)(void) = NULL;
 #endif
 
+static void machine_kexec_print_args(void)
+{
+	unsigned long argc = (int)kexec_args[0];
+	int i;
+
+	pr_info("kexec_args[0] (argc): %lu\n", argc);
+	pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
+	pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
+	pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
+
+	for (i = 0; i < argc; i++) {
+		pr_info("kexec_argv[%d] = %p, %s\n",
+				i, kexec_argv[i], kexec_argv[i]);
+	}
+}
+
+static void machine_kexec_init_argv(struct kimage *image)
+{
+	void __user *buf = NULL;
+	size_t bufsz;
+	size_t size;
+	int i;
+
+	bufsz = 0;
+	for (i = 0; i < image->nr_segments; i++) {
+		struct kexec_segment *seg;
+
+		seg = &image->segment[i];
+		if (seg->bufsz < 6)
+			continue;
+
+		if (strncmp((char *) seg->buf, "kexec ", 6))
+			continue;
+
+		buf = seg->buf;
+		bufsz = seg->bufsz;
+		break;
+	}
+
+	if (!buf)
+		return;
+
+	size = KEXEC_COMMAND_LINE_SIZE;
+	size = min(size, bufsz);
+	if (size < bufsz)
+		pr_warn("kexec command line truncated to %zd bytes\n", size);
+
+	/* Copy to kernel space */
+	copy_from_user(kexec_argv_buf, buf, size);
+	kexec_argv_buf[size - 1] = 0;
+}
+
+static void machine_kexec_parse_argv(struct kimage *image)
+{
+	char *reboot_code_buffer;
+	int reloc_delta;
+	char *ptr;
+	int argc;
+	int i;
+
+	ptr = kexec_argv_buf;
+	argc = 0;
+
+	/*
+	 * convert command line string to array of parameters
+	 * (as bootloader does).
+	 */
+	while (ptr && *ptr && (argc < KEXEC_MAX_ARGC)) {
+		if (*ptr == ' ') {
+			*ptr++ = '\0';
+			continue;
+		}
+
+		kexec_argv[argc++] = ptr;
+		ptr = strchr(ptr, ' ');
+	}
+
+	if (!argc)
+		return;
+
+	kexec_args[0] = argc;
+	kexec_args[1] = (unsigned long)kexec_argv;
+	kexec_args[2] = 0;
+	kexec_args[3] = 0;
+
+	reboot_code_buffer = page_address(image->control_code_page);
+	reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
+
+	kexec_args[1] += reloc_delta;
+	for (i = 0; i < argc; i++)
+		kexec_argv[i] += reloc_delta;
+}
+
 int
 machine_kexec_prepare(struct kimage *kimage)
 {
+	/*
+	 * Whenever arguments passed from kexec-tools, Init the arguments as
+	 * the original ones to try avoiding booting failure.
+	 */
+
+	kexec_args[0] = fw_arg0;
+	kexec_args[1] = fw_arg1;
+	kexec_args[2] = fw_arg2;
+	kexec_args[3] = fw_arg3;
+
+	machine_kexec_init_argv(kimage);
+	machine_kexec_parse_argv(kimage);
+
 	if (_machine_kexec_prepare)
 		return _machine_kexec_prepare(kimage);
 	return 0;
@@ -67,10 +170,12 @@ machine_kexec(struct kimage *image)
 	unsigned long *ptr;
 
 	reboot_code_buffer =
-	  (unsigned long)page_address(image->control_code_page);
+		(unsigned long)page_address(image->control_code_page);
+	pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
 
 	kexec_start_address =
 		(unsigned long) phys_to_virt(image->start);
+	pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
 
 	if (image->type == KEXEC_TYPE_DEFAULT) {
 		kexec_indirection_page =
@@ -78,9 +183,19 @@ machine_kexec(struct kimage *image)
 	} else {
 		kexec_indirection_page = (unsigned long)&image->head;
 	}
+	pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
+
+	pr_info("Where is memcpy: %p\n", memcpy);
+	pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
+		(void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
+	pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
+		(void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
+	memcpy((void *)reboot_code_buffer, kexec_relocate_new_kernel,
+	       KEXEC_RELOCATE_NEW_KERNEL_SIZE);
 
-	memcpy((void*)reboot_code_buffer, relocate_new_kernel,
-	       relocate_new_kernel_size);
+	pr_info("Before _print_args().\n");
+	machine_kexec_print_args();
+	pr_info("Before eval loop.\n");
 
 	/*
 	 * The generic kexec code builds a page list with physical
@@ -99,15 +214,16 @@ machine_kexec(struct kimage *image)
 	/*
 	 * we do not want to be bothered.
 	 */
+	pr_info("Before irq_disable.\n");
 	local_irq_disable();
 
-	printk("Will call new kernel at %08lx\n", image->start);
-	printk("Bye ...\n");
+	pr_info("Will call new kernel at %08lx\n", image->start);
+	pr_info("Bye ...\n");
 	__flush_cache_all();
 #ifdef CONFIG_SMP
 	/* All secondary cpus now may jump to kexec_wait cycle */
 	relocated_kexec_smp_wait = reboot_code_buffer +
-		(void *)(kexec_smp_wait - relocate_new_kernel);
+		(void *)(kexec_smp_wait - kexec_relocate_new_kernel);
 	smp_wmb();
 	atomic_set(&kexec_ready_to_reboot, 1);
 #endif
diff --git a/arch/mips/kernel/machine_kexec.h b/arch/mips/kernel/machine_kexec.h
new file mode 100644
index 000000000..ae0961e29
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.h
@@ -0,0 +1,20 @@
+#ifndef _MACHINE_KEXEC_H
+#define _MACHINE_KEXEC_H
+
+#ifndef __ASSEMBLY__
+extern const unsigned char kexec_relocate_new_kernel[];
+extern unsigned long kexec_relocate_new_kernel_end;
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+extern char kexec_argv_buf[];
+extern char *kexec_argv[];
+
+#define KEXEC_RELOCATE_NEW_KERNEL_SIZE	((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
+#endif /* !__ASSEMBLY__ */
+
+#define KEXEC_COMMAND_LINE_SIZE		256
+#define KEXEC_ARGV_SIZE			(KEXEC_COMMAND_LINE_SIZE / 16)
+#define KEXEC_MAX_ARGC			(KEXEC_ARGV_SIZE / sizeof(long))
+
+#endif
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index c6bbf2165..42eff24eb 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -12,8 +12,9 @@
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
 #include <asm/addrspace.h>
+#include "machine_kexec.h"
 
-LEAF(relocate_new_kernel)
+LEAF(kexec_relocate_new_kernel)
 	PTR_L a0,	arg0
 	PTR_L a1,	arg1
 	PTR_L a2,	arg2
@@ -98,7 +99,7 @@ done:
 #endif
 	/* jump to kexec_start_address */
 	j		s1
-	END(relocate_new_kernel)
+	END(kexec_relocate_new_kernel)
 
 #ifdef CONFIG_SMP
 /*
@@ -184,9 +185,15 @@ kexec_indirection_page:
 	PTR		0
 	.size		kexec_indirection_page, PTRSIZE
 
-relocate_new_kernel_end:
+kexec_argv_buf:
+	EXPORT(kexec_argv_buf)
+	.skip		KEXEC_COMMAND_LINE_SIZE
+	.size		kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
 
-relocate_new_kernel_size:
-	EXPORT(relocate_new_kernel_size)
-	PTR		relocate_new_kernel_end - relocate_new_kernel
-	.size		relocate_new_kernel_size, PTRSIZE
+kexec_argv:
+	EXPORT(kexec_argv)
+	.skip		KEXEC_ARGV_SIZE
+	.size		kexec_argv, KEXEC_ARGV_SIZE
+
+kexec_relocate_new_kernel_end:
+	EXPORT(kexec_relocate_new_kernel_end)
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 0344e575f..33a0211e9 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -4,7 +4,7 @@
 
 lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
 	   mips-atomic.o strlen_user.o strncpy_user.o \
-	   strnlen_user.o uncached.o
+	   strnlen_user.o uncached.o memcmp.o
 
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
diff --git a/arch/mips/lib/memcmp.c b/arch/mips/lib/memcmp.c
new file mode 100644
index 000000000..35ef16462
--- /dev/null
+++ b/arch/mips/lib/memcmp.c
@@ -0,0 +1,22 @@
+/*
+ *  copied from linux/lib/string.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+	const unsigned char *su1, *su2;
+	int res = 0;
+
+	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+		if ((res = *su1 - *su2) != 0)
+			break;
+	return res;
+}
+EXPORT_SYMBOL(memcmp);
+
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7478e59e7..d485c3daa 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -117,6 +117,13 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 {
 	unsigned long addr = (unsigned long) page_address(page);
 
+	if (PageHighMem(page)) {
+		addr = (unsigned long)kmap_atomic(page);
+		flush_data_cache_page(addr);
+		__kunmap_atomic((void *)addr);
+		return;
+	}
+
 	if (pages_do_alias(addr, vmaddr)) {
 		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
 			void *kaddr;
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 53f696051..b6ece7518 100755
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -198,7 +198,7 @@ void register_pci_controller(struct pci_controller *hose)
 	}
 
 	INIT_LIST_HEAD(&hose->list);
-	list_add(&hose->list, &controllers);
+	list_add_tail(&hose->list, &controllers);
 
 	/*
 	 * Do not panic here but later - this might happen before console init.
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index c3dc12a8b..28f66e3bb 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -28,9 +28,9 @@ aflags-vdso := $(ccflags-vdso) \
 ifndef CONFIG_CPU_MIPSR6
   ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
     $(warning MIPS VDSO requires binutils >= 2.25)
-    obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
-    ccflags-vdso += -DDISABLE_MIPS_VDSO
   endif
+  obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
+  ccflags-vdso += -DDISABLE_MIPS_VDSO
 endif
 
 # VDSO linker flags.
diff --git a/crypto/Kconfig b/crypto/Kconfig
index fa98ad7ed..6ed381d63 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -32,7 +32,7 @@ config CRYPTO_FIPS
 	  this is.
 
 config CRYPTO_ALGAPI
-	tristate
+	tristate "ALGAPI"
 	select CRYPTO_ALGAPI2
 	help
 	  This option provides the API for cryptographic algorithms.
@@ -41,7 +41,7 @@ config CRYPTO_ALGAPI2
 	tristate
 
 config CRYPTO_AEAD
-	tristate
+	tristate "AEAD"
 	select CRYPTO_AEAD2
 	select CRYPTO_ALGAPI
 
@@ -52,7 +52,7 @@ config CRYPTO_AEAD2
 	select CRYPTO_RNG2
 
 config CRYPTO_BLKCIPHER
-	tristate
+	tristate "BLKCIPHER"
 	select CRYPTO_BLKCIPHER2
 	select CRYPTO_ALGAPI
 
@@ -63,7 +63,7 @@ config CRYPTO_BLKCIPHER2
 	select CRYPTO_WORKQUEUE
 
 config CRYPTO_HASH
-	tristate
+	tristate "HASH"
 	select CRYPTO_HASH2
 	select CRYPTO_ALGAPI
 
@@ -72,7 +72,7 @@ config CRYPTO_HASH2
 	select CRYPTO_ALGAPI2
 
 config CRYPTO_RNG
-	tristate
+	tristate "RNG"
 	select CRYPTO_RNG2
 	select CRYPTO_ALGAPI
 
@@ -132,12 +132,12 @@ config CRYPTO_MANAGER
 	  cbc(aes).
 
 config CRYPTO_MANAGER2
-	def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
-	select CRYPTO_AEAD2
-	select CRYPTO_HASH2
-	select CRYPTO_BLKCIPHER2
-	select CRYPTO_AKCIPHER2
-	select CRYPTO_KPP2
+	def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y && !CRYPTO_MANAGER_DISABLE_TESTS)
+	select CRYPTO_AEAD2 if !CRYPTO_MANAGER_DISABLE_TESTS
+	select CRYPTO_HASH2 if !CRYPTO_MANAGER_DISABLE_TESTS
+	select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+	select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
+	select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
 
 config CRYPTO_USER
 	tristate "Userspace cryptographic algorithm configuration"
@@ -150,7 +150,6 @@ config CRYPTO_USER
 config CRYPTO_MANAGER_DISABLE_TESTS
 	bool "Disable run-time self tests"
 	default y
-	depends on CRYPTO_MANAGER2
 	help
 	  Disable run-time self tests that normally take place at
 	  algorithm registration.
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 6e39d9c05..4bde25d62 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -248,12 +248,16 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
 	type = alg->cra_flags;
 
 	/* This piece of crap needs to disappear into per-type test hooks. */
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+	type |= CRYPTO_ALG_TESTED;
+#else
 	if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
 	      CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
 	    ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 					 alg->cra_ablkcipher.ivsize))
 		type |= CRYPTO_ALG_TESTED;
+#endif
 
 	param->type = type;
 
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2c8be74f4..8ffccd565 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -46,6 +46,22 @@ config ATA_VERBOSE_ERROR
 
 	  If unsure, say Y.
 
+config ARCH_WANT_LIBATA_LEDS
+	bool
+
+config ATA_LEDS
+	bool "support ATA port LED triggers"
+	depends on ARCH_WANT_LIBATA_LEDS
+	select NEW_LEDS
+	select LEDS_CLASS
+	select LEDS_TRIGGERS
+	default y
+	help
+	  This option adds a LED trigger for each registered ATA port.
+	  It is used to drive disk activity leds connected via GPIO.
+
+	  If unsure, say N.
+
 config ATA_ACPI
 	bool "ATA ACPI Support"
 	depends on ACPI
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 33e363dcc..54442fced 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -731,6 +731,19 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
 	return block;
 }
 
+#ifdef CONFIG_ATA_LEDS
+#define LIBATA_BLINK_DELAY 20 /* ms */
+static inline void ata_led_act(struct ata_port *ap)
+{
+	unsigned long led_delay = LIBATA_BLINK_DELAY;
+
+	if (unlikely(!ap->ledtrig))
+		return;
+
+	led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0);
+}
+#endif
+
 /**
  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
  *	@tf: Target ATA taskfile
@@ -4963,6 +4976,9 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
 		if (tag < 0)
 			return NULL;
 	}
+#ifdef CONFIG_ATA_LEDS
+	ata_led_act(ap);
+#endif
 
 	qc = __ata_qc_from_tag(ap, tag);
 	qc->tag = tag;
@@ -5864,6 +5880,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
 #ifdef ATA_IRQ_TRAP
 	ap->stats.unhandled_irq = 1;
 	ap->stats.idle_irq = 1;
+#endif
+#ifdef CONFIG_ATA_LEDS
+	ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
 #endif
 	ata_sff_port_init(ap);
 
@@ -5886,6 +5905,12 @@ static void ata_host_release(struct device *gendev, void *res)
 
 		kfree(ap->pmp_link);
 		kfree(ap->slave_link);
+#ifdef CONFIG_ATA_LEDS
+		if (ap->ledtrig) {
+			led_trigger_unregister(ap->ledtrig);
+			kfree(ap->ledtrig);
+		};
+#endif
 		kfree(ap);
 		host->ports[i] = NULL;
 	}
@@ -6332,7 +6357,23 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
 		host->ports[i]->local_port_no = i + 1;
 	}
+#ifdef CONFIG_ATA_LEDS
+	for (i = 0; i < host->n_ports; i++) {
+		if (unlikely(!host->ports[i]->ledtrig))
+			continue;
+
+		snprintf(host->ports[i]->ledtrig_name,
+			sizeof(host->ports[i]->ledtrig_name), "ata%u",
+			host->ports[i]->print_id);
 
+		host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
+
+		if (led_trigger_register(host->ports[i]->ledtrig)) {
+			kfree(host->ports[i]->ledtrig);
+			host->ports[i]->ledtrig = NULL;
+		}
+	}
+#endif
 	/* Create associated sysfs transport objects  */
 	for (i = 0; i < host->n_ports; i++) {
 		rc = ata_tport_add(host->dev,host->ports[i]);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index db9d00c36..c4cb75923 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,29 +3,37 @@
 # subsystems should select the appropriate symbols.
 
 config REGMAP
-	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+	select IRQ_DOMAIN if REGMAP_IRQ
+	tristate "Regmap"
+
+config REGCACHE_COMPRESSED
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
-	select IRQ_DOMAIN if REGMAP_IRQ
 	bool
 
 config REGMAP_AC97
 	tristate
 
 config REGMAP_I2C
-	tristate
+	tristate "Regmap I2C"
+	select REGMAP
 	depends on I2C
 
 config REGMAP_SPI
-	tristate
+	tristate "Regmap SPI"
+	select REGMAP
+	depends on SPI_MASTER
 	depends on SPI
 
 config REGMAP_SPMI
+	select REGMAP
 	tristate
 	depends on SPMI
 
 config REGMAP_MMIO
-	tristate
+	tristate "Regmap MMIO"
+	select REGMAP
 
 config REGMAP_IRQ
+	select REGMAP
 	bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 609e4c84f..f23c58a05 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,9 +1,14 @@
 # For include/trace/define_trace.h to include trace.h
 CFLAGS_regmap.o := -I$(src)
 
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
-obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+regmap-core-objs = regmap.o regcache.o regcache-rbtree.o regcache-flat.o
+ifdef CONFIG_DEBUG_FS
+regmap-core-objs += regmap-debugfs.o
+endif
+ifdef CONFIG_REGCACHE_COMPRESSED
+regmap-core-objs += regcache-lzo.o
+endif
+obj-$(CONFIG_REGMAP) += regmap-core.o
 obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4e582561e..3644fe8b2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -21,7 +21,9 @@
 
 static const struct regcache_ops *cache_types[] = {
 	&regcache_rbtree_ops,
+#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
 	&regcache_lzo_ops,
+#endif
 	&regcache_flat_ops,
 };
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ae63bb087..2ac56b4f5 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -13,6 +13,7 @@
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/err.h>
 #include <linux/of.h>
@@ -2913,3 +2914,5 @@ static int __init regmap_initcall(void)
 	return 0;
 }
 postcore_initcall(regmap_initcall);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index b5c48a8d4..6cb52081b 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -17,6 +17,7 @@ config BCMA
 config BCMA_BLOCKIO
 	bool
 	depends on BCMA
+	default y
 
 config BCMA_HOST_PCI_POSSIBLE
 	bool
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 4a8885764..75e992079 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGERS)		+= trigger/
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV)	+= ledtrig-netdev.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index aa84e5b37..6f8ba4ca7 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -181,11 +181,14 @@ static int led_classdev_next_name(const char *init_name, char *name,
 }
 
 /**
- * led_classdev_register - register a new object of led_classdev class.
- * @parent: The device to register.
+ * of_led_classdev_register - register a new object of led_classdev class.
+ *
+ * @parent: parent of LED device
  * @led_cdev: the led_classdev structure for this device.
+ * @np: DT node describing this LED
  */
-int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
+int of_led_classdev_register(struct device *parent, struct device_node *np,
+			    struct led_classdev *led_cdev)
 {
 	char name[64];
 	int ret;
@@ -198,6 +201,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 				led_cdev, led_cdev->groups, "%s", name);
 	if (IS_ERR(led_cdev->dev))
 		return PTR_ERR(led_cdev->dev);
+	led_cdev->dev->of_node = np;
 
 	if (ret)
 		dev_warn(parent, "Led %s renamed to %s due to name collision",
@@ -228,7 +232,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(led_classdev_register);
+EXPORT_SYMBOL_GPL(of_led_classdev_register);
 
 /**
  * led_classdev_unregister - unregisters a object of led_properties class.
@@ -270,12 +274,14 @@ static void devm_led_classdev_release(struct device *dev, void *res)
 }
 
 /**
- * devm_led_classdev_register - resource managed led_classdev_register()
- * @parent: The device to register.
+ * devm_of_led_classdev_register - resource managed led_classdev_register()
+ *
+ * @parent: parent of LED device
  * @led_cdev: the led_classdev structure for this device.
  */
-int devm_led_classdev_register(struct device *parent,
-			       struct led_classdev *led_cdev)
+int devm_of_led_classdev_register(struct device *parent,
+				  struct device_node *np,
+				  struct led_classdev *led_cdev)
 {
 	struct led_classdev **dr;
 	int rc;
@@ -284,7 +290,7 @@ int devm_led_classdev_register(struct device *parent,
 	if (!dr)
 		return -ENOMEM;
 
-	rc = led_classdev_register(parent, led_cdev);
+	rc = of_led_classdev_register(parent, np, led_cdev);
 	if (rc) {
 		devres_free(dr);
 		return rc;
@@ -295,7 +301,7 @@ int devm_led_classdev_register(struct device *parent,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(devm_led_classdev_register);
+EXPORT_SYMBOL_GPL(devm_of_led_classdev_register);
 
 static int devm_led_classdev_match(struct device *dev, void *res, void *data)
 {
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d400dcaf4..c5230249f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -77,7 +77,7 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
 
 static int create_gpio_led(const struct gpio_led *template,
 	struct gpio_led_data *led_dat, struct device *parent,
-	gpio_blink_set_t blink_set)
+	struct device_node *np, gpio_blink_set_t blink_set)
 {
 	int ret, state;
 
@@ -139,7 +139,7 @@ static int create_gpio_led(const struct gpio_led *template,
 	if (ret < 0)
 		return ret;
 
-	return devm_led_classdev_register(parent, &led_dat->cdev);
+	return devm_of_led_classdev_register(parent, np, &led_dat->cdev);
 }
 
 struct gpio_leds_priv {
@@ -206,7 +206,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
 		if (fwnode_property_present(child, "panic-indicator"))
 			led.panic_indicator = 1;
 
-		ret = create_gpio_led(&led, led_dat, dev, NULL);
+		ret = create_gpio_led(&led, led_dat, dev, np, NULL);
 		if (ret < 0) {
 			fwnode_handle_put(child);
 			return ERR_PTR(ret);
@@ -240,9 +240,9 @@ static int gpio_led_probe(struct platform_device *pdev)
 
 		priv->num_leds = pdata->num_leds;
 		for (i = 0; i < priv->num_leds; i++) {
-			ret = create_gpio_led(&pdata->leds[i],
-					      &priv->leds[i],
-					      &pdev->dev, pdata->gpio_blink_set);
+			ret = create_gpio_led(&pdata->leds[i], &priv->leds[i],
+					      &pdev->dev, NULL,
+					      pdata->gpio_blink_set);
 			if (ret < 0)
 				return ret;
 		}
diff --git a/drivers/leds/ledtrig-netdev.c b/drivers/leds/ledtrig-netdev.c
new file mode 100644
index 000000000..8d3249010
--- /dev/null
+++ b/drivers/leds/ledtrig-netdev.c
@@ -0,0 +1,444 @@
+/*
+ * LED Kernel Netdev Trigger
+ *
+ * Toggles the LED to reflect the link and traffic state of a named net device
+ *
+ * Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+ *
+ * Derived from ledtrig-timer.c which is:
+ *  Copyright 2005-2006 Openedhand Ltd.
+ *  Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/leds.h>
+
+#include "leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ *
+ * interval - duration of LED blink, in milliseconds
+ *
+ * mode - either "none" (LED is off) or a space separated list of one or more of:
+ *   link: LED's normal state reflects whether the link is up (has carrier) or not
+ *   tx:   LED blinks on transmitted data
+ *   rx:   LED blinks on receive data
+ *
+ * Some suggestions:
+ *
+ *  Simple link status LED:
+ *  $ echo netdev >someled/trigger
+ *  $ echo eth0 >someled/device_name
+ *  $ echo link >someled/mode
+ *
+ *  Ethernet-style link/activity LED:
+ *  $ echo netdev >someled/trigger
+ *  $ echo eth0 >someled/device_name
+ *  $ echo "link tx rx" >someled/mode
+ *
+ *  Modem-style tx/rx LEDs:
+ *  $ echo netdev >led1/trigger
+ *  $ echo ppp0 >led1/device_name
+ *  $ echo tx >led1/mode
+ *  $ echo netdev >led2/trigger
+ *  $ echo ppp0 >led2/device_name
+ *  $ echo rx >led2/mode
+ *
+ */
+
+#define MODE_LINK 1
+#define MODE_TX   2
+#define MODE_RX   4
+
+struct led_netdev_data {
+	spinlock_t lock;
+
+	struct delayed_work work;
+	struct notifier_block notifier;
+
+	struct led_classdev *led_cdev;
+	struct net_device *net_dev;
+
+	char device_name[IFNAMSIZ];
+	unsigned interval;
+	unsigned mode;
+	unsigned link_up;
+	unsigned last_activity;
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+	if ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up)
+		led_set_brightness(trigger_data->led_cdev, LED_FULL);
+	else
+		led_set_brightness(trigger_data->led_cdev, LED_OFF);
+
+	if ((trigger_data->mode & (MODE_TX | MODE_RX)) != 0 && trigger_data->link_up)
+		schedule_delayed_work(&trigger_data->work, trigger_data->interval);
+}
+
+static ssize_t led_device_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+	spin_lock_bh(&trigger_data->lock);
+	sprintf(buf, "%s\n", trigger_data->device_name);
+	spin_unlock_bh(&trigger_data->lock);
+
+	return strlen(buf) + 1;
+}
+
+static ssize_t led_device_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+	if (size >= IFNAMSIZ)
+		return -EINVAL;
+
+	cancel_delayed_work_sync(&trigger_data->work);
+
+	spin_lock_bh(&trigger_data->lock);
+
+	strcpy(trigger_data->device_name, buf);
+	if (size > 0 && trigger_data->device_name[size-1] == '\n')
+		trigger_data->device_name[size-1] = 0;
+	trigger_data->link_up = 0;
+	trigger_data->last_activity = 0;
+
+	if (trigger_data->device_name[0] != 0) {
+		/* check for existing device to update from */
+		trigger_data->net_dev = dev_get_by_name(&init_net, trigger_data->device_name);
+		if (trigger_data->net_dev != NULL)
+			trigger_data->link_up = (dev_get_flags(trigger_data->net_dev) & IFF_LOWER_UP) != 0;
+	}
+
+	set_baseline_state(trigger_data);
+	spin_unlock_bh(&trigger_data->lock);
+
+	return size;
+}
+
+static DEVICE_ATTR(device_name, 0644, led_device_name_show, led_device_name_store);
+
+static ssize_t led_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+	spin_lock_bh(&trigger_data->lock);
+
+	if (trigger_data->mode == 0) {
+		strcpy(buf, "none\n");
+	} else {
+		if (trigger_data->mode & MODE_LINK)
+			strcat(buf, "link ");
+		if (trigger_data->mode & MODE_TX)
+			strcat(buf, "tx ");
+		if (trigger_data->mode & MODE_RX)
+			strcat(buf, "rx ");
+		strcat(buf, "\n");
+	}
+
+	spin_unlock_bh(&trigger_data->lock);
+
+	return strlen(buf)+1;
+}
+
+static ssize_t led_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+	char copybuf[128];
+	int new_mode = -1;
+	char *p, *token;
+
+	/* take a copy since we don't want to trash the inbound buffer when using strsep */
+	strncpy(copybuf, buf, sizeof(copybuf));
+	copybuf[sizeof(copybuf) - 1] = 0;
+	p = copybuf;
+
+	while ((token = strsep(&p, " \t\n")) != NULL) {
+		if (!*token)
+			continue;
+
+		if (new_mode == -1)
+			new_mode = 0;
+
+		if (!strcmp(token, "none"))
+			new_mode = 0;
+		else if (!strcmp(token, "tx"))
+			new_mode |= MODE_TX;
+		else if (!strcmp(token, "rx"))
+			new_mode |= MODE_RX;
+		else if (!strcmp(token, "link"))
+			new_mode |= MODE_LINK;
+		else
+			return -EINVAL;
+	}
+
+	if (new_mode == -1)
+		return -EINVAL;
+
+	cancel_delayed_work_sync(&trigger_data->work);
+
+	spin_lock_bh(&trigger_data->lock);
+	trigger_data->mode = new_mode;
+	set_baseline_state(trigger_data);
+	spin_unlock_bh(&trigger_data->lock);
+
+	return size;
+}
+
+static DEVICE_ATTR(mode, 0644, led_mode_show, led_mode_store);
+
+static ssize_t led_interval_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+	spin_lock_bh(&trigger_data->lock);
+	sprintf(buf, "%u\n", jiffies_to_msecs(trigger_data->interval));
+	spin_unlock_bh(&trigger_data->lock);
+
+	return strlen(buf) + 1;
+}
+
+static ssize_t led_interval_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+	int ret = -EINVAL;
+	char *after;
+	unsigned long value = simple_strtoul(buf, &after, 10);
+	size_t count = after - buf;
+
+	if (isspace(*after))
+		count++;
+
+	/* impose some basic bounds on the timer interval */
+	if (count == size && value >= 5 && value <= 10000) {
+		cancel_delayed_work_sync(&trigger_data->work);
+
+		spin_lock_bh(&trigger_data->lock);
+		trigger_data->interval = msecs_to_jiffies(value);
+		set_baseline_state(trigger_data); /* resets timer */
+		spin_unlock_bh(&trigger_data->lock);
+
+		ret = count;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(interval, 0644, led_interval_show, led_interval_store);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+			      unsigned long evt,
+			      void *dv)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev((struct netdev_notifier_info *) dv);
+	struct led_netdev_data *trigger_data = container_of(nb, struct led_netdev_data, notifier);
+
+	if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER && evt != NETDEV_CHANGENAME)
+		return NOTIFY_DONE;
+
+	if (strcmp(dev->name, trigger_data->device_name))
+		return NOTIFY_DONE;
+
+	cancel_delayed_work_sync(&trigger_data->work);
+
+	spin_lock_bh(&trigger_data->lock);
+
+	if (evt == NETDEV_REGISTER || evt == NETDEV_CHANGENAME) {
+		if (trigger_data->net_dev != NULL)
+			dev_put(trigger_data->net_dev);
+
+		dev_hold(dev);
+		trigger_data->net_dev = dev;
+		trigger_data->link_up = 0;
+		goto done;
+	}
+
+	if (evt == NETDEV_UNREGISTER && trigger_data->net_dev != NULL) {
+		dev_put(trigger_data->net_dev);
+		trigger_data->net_dev = NULL;
+		goto done;
+	}
+
+	/* UP / DOWN / CHANGE */
+
+	trigger_data->link_up = (evt != NETDEV_DOWN && netif_carrier_ok(dev));
+	set_baseline_state(trigger_data);
+
+done:
+	spin_unlock_bh(&trigger_data->lock);
+	return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_work(struct work_struct *work)
+{
+	struct led_netdev_data *trigger_data = container_of(work, struct led_netdev_data, work.work);
+	struct rtnl_link_stats64 *dev_stats;
+	unsigned new_activity;
+	struct rtnl_link_stats64 temp;
+
+	if (!trigger_data->link_up || !trigger_data->net_dev || (trigger_data->mode & (MODE_TX | MODE_RX)) == 0) {
+		/* we don't need to do timer work, just reflect link state. */
+		led_set_brightness(trigger_data->led_cdev, ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up) ? LED_FULL : LED_OFF);
+		return;
+	}
+
+	dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+	new_activity =
+		((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) +
+		((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0);
+
+	if (trigger_data->mode & MODE_LINK) {
+		/* base state is ON (link present) */
+		/* if there's no link, we don't get this far and the LED is off */
+
+		/* OFF -> ON always */
+		/* ON -> OFF on activity */
+		if (trigger_data->led_cdev->brightness == LED_OFF) {
+			led_set_brightness(trigger_data->led_cdev, LED_FULL);
+		} else if (trigger_data->last_activity != new_activity) {
+			led_set_brightness(trigger_data->led_cdev, LED_OFF);
+		}
+	} else {
+		/* base state is OFF */
+		/* ON -> OFF always */
+		/* OFF -> ON on activity */
+		if (trigger_data->led_cdev->brightness == LED_FULL) {
+			led_set_brightness(trigger_data->led_cdev, LED_OFF);
+		} else if (trigger_data->last_activity != new_activity) {
+			led_set_brightness(trigger_data->led_cdev, LED_FULL);
+		}
+	}
+
+	trigger_data->last_activity = new_activity;
+	schedule_delayed_work(&trigger_data->work, trigger_data->interval);
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+	struct led_netdev_data *trigger_data;
+	int rc;
+
+	trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+	if (!trigger_data)
+		return;
+
+	spin_lock_init(&trigger_data->lock);
+
+	trigger_data->notifier.notifier_call = netdev_trig_notify;
+	trigger_data->notifier.priority = 10;
+
+	INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
+
+	trigger_data->led_cdev = led_cdev;
+	trigger_data->net_dev = NULL;
+	trigger_data->device_name[0] = 0;
+
+	trigger_data->mode = 0;
+	trigger_data->interval = msecs_to_jiffies(50);
+	trigger_data->link_up = 0;
+	trigger_data->last_activity = 0;
+
+	led_cdev->trigger_data = trigger_data;
+
+	rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+	if (rc)
+		goto err_out;
+	rc = device_create_file(led_cdev->dev, &dev_attr_mode);
+	if (rc)
+		goto err_out_device_name;
+	rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+	if (rc)
+		goto err_out_mode;
+
+	register_netdevice_notifier(&trigger_data->notifier);
+	return;
+
+err_out_mode:
+	device_remove_file(led_cdev->dev, &dev_attr_mode);
+err_out_device_name:
+	device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+	led_cdev->trigger_data = NULL;
+	kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+	struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+	if (trigger_data) {
+		unregister_netdevice_notifier(&trigger_data->notifier);
+
+		device_remove_file(led_cdev->dev, &dev_attr_device_name);
+		device_remove_file(led_cdev->dev, &dev_attr_mode);
+		device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+		cancel_delayed_work_sync(&trigger_data->work);
+
+		spin_lock_bh(&trigger_data->lock);
+
+		if (trigger_data->net_dev) {
+			dev_put(trigger_data->net_dev);
+			trigger_data->net_dev = NULL;
+		}
+
+		spin_unlock_bh(&trigger_data->lock);
+
+		kfree(trigger_data);
+	}
+}
+
+static struct led_trigger netdev_led_trigger = {
+	.name     = "netdev",
+	.activate = netdev_trig_activate,
+	.deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+	return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+	led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 3f9ddb9fa..4ec185389 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -126,4 +126,11 @@ config LEDS_TRIGGER_PANIC
 	  a different trigger.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_NETDEV
+	tristate "LED Netdev Trigger"
+	depends on NET && LEDS_TRIGGERS
+	help
+	  This allows LEDs to be controlled by network device activity.
+	  If unsure, say Y.
+
 endif # LEDS_TRIGGERS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881bb..b4cc09752 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -807,7 +807,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
 		return 0;
 
 	case FL_ERASING:
-		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+		if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
 		    !(mode == FL_READY || mode == FL_POINT ||
 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
 			goto sleep;
@@ -1828,6 +1828,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
 
 	/* Write Buffer Load */
 	map_write(map, CMD(0x25), cmd_adr);
+	(void) map_read(map, cmd_adr);
 
 	chip->state = FL_WRITING_TO_BUFFER;
 
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7c887f111..9865041c3 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/init.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
 #include <linux/mutex.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
@@ -219,7 +220,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
 
 
 static struct block2mtd_dev *add_device(char *devname, int erase_size,
-		int timeout)
+		const char *mtdname, int timeout)
 {
 #ifndef MODULE
 	int i;
@@ -227,6 +228,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
 	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
 	struct block_device *bdev = ERR_PTR(-ENODEV);
 	struct block2mtd_dev *dev;
+	struct mtd_partition *part;
 	char *name;
 
 	if (!devname)
@@ -283,13 +285,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
 
 	/* Setup the MTD structure */
 	/* make the name contain the block device in */
-	name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
+	if (!mtdname)
+		mtdname = devname;
+	name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
 	if (!name)
 		goto err_destroy_mutex;
 
+	strcpy(name, mtdname);
 	dev->mtd.name = name;
 
-	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
 	dev->mtd.erasesize = erase_size;
 	dev->mtd.writesize = 1;
 	dev->mtd.writebufsize = PAGE_SIZE;
@@ -302,7 +307,11 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
 	dev->mtd.priv = dev;
 	dev->mtd.owner = THIS_MODULE;
 
-	if (mtd_device_register(&dev->mtd, NULL, 0)) {
+	part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+	part->name = name;
+	part->offset = 0;
+	part->size = dev->mtd.size;
+	if (mtd_device_register(&dev->mtd, part, 1)) {
 		/* Device didn't get added, so free the entry */
 		goto err_destroy_mutex;
 	}
@@ -310,8 +319,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
 	list_add(&dev->list, &blkmtd_device_list);
 	pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
 		dev->mtd.index,
-		dev->mtd.name + strlen("block2mtd: "),
-		dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+		mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
 	return dev;
 
 err_destroy_mutex:
@@ -384,7 +392,7 @@ static int block2mtd_setup2(const char *val)
 	/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
 	char buf[80 + 12 + 80 + 8];
 	char *str = buf;
-	char *token[2];
+	char *token[4];
 	char *name;
 	size_t erase_size = PAGE_SIZE;
 	unsigned long timeout = MTD_DEFAULT_TIMEOUT;
@@ -398,7 +406,7 @@ static int block2mtd_setup2(const char *val)
 	strcpy(str, val);
 	kill_final_newline(str);
 
-	for (i = 0; i < 2; i++)
+	for (i = 0; i < 4; i++)
 		token[i] = strsep(&str, ",");
 
 	if (str) {
@@ -424,8 +432,13 @@ static int block2mtd_setup2(const char *val)
 			return 0;
 		}
 	}
+	if (token[2] && (strlen(token[2]) + 1 > 80))
+		pr_err("mtd device name too long\n");
 
-	add_device(name, erase_size, timeout);
+	if (token[3] && kstrtoul(token[3], 0, &timeout))
+		pr_err("invalid timeout\n");
+
+	add_device(name, erase_size, token[2], timeout);
 
 	return 0;
 }
@@ -459,7 +472,7 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
 
 
 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>[,<timeout>]]]\"");
 
 static int __init block2mtd_init(void)
 {
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 3fad35942..11d63046d 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -113,47 +113,9 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev,
 static const char * const part_probe_types_def[] = {
 	"cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
 
-static const char * const *of_get_probes(struct device_node *dp)
-{
-	const char *cp;
-	int cplen;
-	unsigned int l;
-	unsigned int count;
-	const char **res;
-
-	cp = of_get_property(dp, "linux,part-probe", &cplen);
-	if (cp == NULL)
-		return part_probe_types_def;
-
-	count = 0;
-	for (l = 0; l != cplen; l++)
-		if (cp[l] == 0)
-			count++;
-
-	res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
-	if (!res)
-		return NULL;
-	count = 0;
-	while (cplen > 0) {
-		res[count] = cp;
-		l = strlen(cp) + 1;
-		cp += l;
-		cplen -= l;
-		count++;
-	}
-	return res;
-}
-
-static void of_free_probes(const char * const *probes)
-{
-	if (probes != part_probe_types_def)
-		kfree(probes);
-}
-
 static const struct of_device_id of_flash_match[];
 static int of_flash_probe(struct platform_device *dev)
 {
-	const char * const *part_probe_types;
 	const struct of_device_id *match;
 	struct device_node *dp = dev->dev.of_node;
 	struct resource res;
@@ -317,14 +279,8 @@ static int of_flash_probe(struct platform_device *dev)
 
 	info->cmtd->dev.parent = &dev->dev;
 	mtd_set_of_node(info->cmtd, dp);
-	part_probe_types = of_get_probes(dp);
-	if (!part_probe_types) {
-		err = -ENOMEM;
-		goto err_out;
-	}
-	mtd_device_parse_register(info->cmtd, part_probe_types, NULL,
+	mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
 			NULL, 0);
-	of_free_probes(part_probe_types);
 
 	kfree(mtd_list);
 
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index d46e4adf6..4d62d898d 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -40,6 +40,7 @@
 #include <linux/slab.h>
 #include <linux/reboot.h>
 #include <linux/leds.h>
+#include <linux/root_dev.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -570,6 +571,15 @@ int add_mtd_device(struct mtd_info *mtd)
 	   of this try_ nonsense, and no bitching about it
 	   either. :) */
 	__module_get(THIS_MODULE);
+
+	if (!strcmp(mtd->name, "rootfs") &&
+	    IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+	    ROOT_DEV == 0) {
+		pr_notice("mtd: device %d (%s) set to be root filesystem\n",
+			  mtd->index, mtd->name);
+		ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+	}
+
 	return 0;
 
 fail_added:
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index fccdd49bb..95ac9f737 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -29,6 +29,7 @@
 #include <linux/kmod.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/of.h>
 #include <linux/err.h>
 
 #include "mtdcore.h"
@@ -349,44 +350,30 @@ static const struct mtd_ooblayout_ops part_ooblayout_ops = {
 	.free = part_ooblayout_free,
 };
 
-static inline void free_partition(struct mtd_part *p)
+static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
 {
-	kfree(p->mtd.name);
-	kfree(p);
-}
+	struct mtd_part *part = mtd_to_part(mtd);
 
-/*
- * This function unregisters and destroy all slave MTD objects which are
- * attached to the given master MTD object.
- */
+	return part->master->_max_bad_blocks(part->master,
+					     ofs + part->offset, len);
+}
 
-int del_mtd_partitions(struct mtd_info *master)
+static inline void free_partition(struct mtd_part *p)
 {
-	struct mtd_part *slave, *next;
-	int ret, err = 0;
-
-	mutex_lock(&mtd_partitions_mutex);
-	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
-		if (slave->master == master) {
-			ret = del_mtd_device(&slave->mtd);
-			if (ret < 0) {
-				err = ret;
-				continue;
-			}
-			list_del(&slave->list);
-			free_partition(slave);
-		}
-	mutex_unlock(&mtd_partitions_mutex);
-
-	return err;
+	kfree(p->mtd.name);
+	kfree(p);
 }
 
 static struct mtd_part *allocate_partition(struct mtd_info *master,
 			const struct mtd_partition *part, int partno,
 			uint64_t cur_offset)
 {
+	int wr_alignment = (master->flags & MTD_NO_ERASE) ? master->writesize : 
+							    master->erasesize;
 	struct mtd_part *slave;
+	u32 remainder;
 	char *name;
+	u64 tmp;
 
 	/* allocate the partition structure */
 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
@@ -424,6 +411,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
 				&master->dev :
 				master->dev.parent;
+	slave->mtd.dev.of_node = part->of_node;
 
 	slave->mtd._read = part_read;
 	slave->mtd._write = part_write;
@@ -475,6 +463,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 		slave->mtd._block_isbad = part_block_isbad;
 	if (master->_block_markbad)
 		slave->mtd._block_markbad = part_block_markbad;
+	if (master->_max_bad_blocks)
+		slave->mtd._max_bad_blocks = part_max_bad_blocks;
 
 	if (master->_get_device)
 		slave->mtd._get_device = part_get_device;
@@ -488,10 +478,11 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 	if (slave->offset == MTDPART_OFS_APPEND)
 		slave->offset = cur_offset;
 	if (slave->offset == MTDPART_OFS_NXTBLK) {
+		tmp = cur_offset;
 		slave->offset = cur_offset;
-		if (mtd_mod_by_eb(cur_offset, master) != 0) {
-			/* Round up to next erasesize */
-			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+		remainder = do_div(tmp, wr_alignment);
+		if (remainder) {
+			slave->offset += wr_alignment - remainder;
 			printk(KERN_NOTICE "Moving partition %d: "
 			       "0x%012llx -> 0x%012llx\n", partno,
 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
@@ -556,19 +547,22 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
 		slave->mtd.erasesize = master->erasesize;
 	}
 
-	if ((slave->mtd.flags & MTD_WRITEABLE) &&
-	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
+	tmp = slave->offset;
+	remainder = do_div(tmp, wr_alignment);
+	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
 		/* Doesn't start on a boundary of major erase size */
 		/* FIXME: Let it be writable if it is on a boundary of
 		 * _minor_ erase size though */
 		slave->mtd.flags &= ~MTD_WRITEABLE;
-		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
 			part->name);
 	}
-	if ((slave->mtd.flags & MTD_WRITEABLE) &&
-	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
+
+	tmp = slave->mtd.size;
+	remainder = do_div(tmp, wr_alignment);
+	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
 		slave->mtd.flags &= ~MTD_WRITEABLE;
-		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
 			part->name);
 	}
 
@@ -656,6 +650,50 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
 }
 EXPORT_SYMBOL_GPL(mtd_add_partition);
 
+/**
+ * __mtd_del_partition - delete MTD partition
+ *
+ * @priv: internal MTD struct for partition to be deleted
+ *
+ * This function must be called with the partitions mutex locked.
+ */
+static int __mtd_del_partition(struct mtd_part *priv)
+{
+	int err;
+
+	sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+
+	err = del_mtd_device(&priv->mtd);
+	if (err)
+		return err;
+
+	list_del(&priv->list);
+	free_partition(priv);
+
+	return 0;
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given master MTD object.
+ */
+int del_mtd_partitions(struct mtd_info *master)
+{
+	struct mtd_part *slave, *next;
+	int ret, err = 0;
+
+	mutex_lock(&mtd_partitions_mutex);
+	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+		if (slave->master == master) {
+			ret = __mtd_del_partition(slave);
+			if (ret < 0)
+				err = ret;
+		}
+	mutex_unlock(&mtd_partitions_mutex);
+
+	return err;
+}
+
 int mtd_del_partition(struct mtd_info *master, int partno)
 {
 	struct mtd_part *slave, *next;
@@ -665,14 +703,7 @@ int mtd_del_partition(struct mtd_info *master, int partno)
 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
 		if ((slave->master == master) &&
 		    (slave->mtd.index == partno)) {
-			sysfs_remove_files(&slave->mtd.dev.kobj,
-					   mtd_partition_attrs);
-			ret = del_mtd_device(&slave->mtd);
-			if (ret < 0)
-				break;
-
-			list_del(&slave->list);
-			free_partition(slave);
+			ret = __mtd_del_partition(slave);
 			break;
 		}
 	mutex_unlock(&mtd_partitions_mutex);
@@ -778,6 +809,42 @@ void deregister_mtd_parser(struct mtd_part_parser *p)
 }
 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
 
+/*
+ * Parses the linux,part-probe device tree property.
+ * When a non null value is returned it has to be freed with kfree() by
+ * the caller.
+ */
+static const char * const *of_get_probes(struct device_node *dp)
+{
+	const char *cp;
+	int cplen;
+	unsigned int l;
+	unsigned int count;
+	const char **res;
+
+	cp = of_get_property(dp, "linux,part-probe", &cplen);
+	if (cp == NULL)
+		return NULL;
+
+	count = 0;
+	for (l = 0; l != cplen; l++)
+		if (cp[l] == 0)
+			count++;
+
+	res = kzalloc((count + 1) * sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return NULL;
+	count = 0;
+	while (cplen > 0) {
+		res[count] = cp;
+		l = strlen(cp) + 1;
+		cp += l;
+		cplen -= l;
+		count++;
+	}
+	return res;
+}
+
 /*
  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
  * are changing this array!
@@ -788,6 +855,27 @@ static const char * const default_mtd_part_types[] = {
 	NULL
 };
 
+static int mtd_part_do_parse(struct mtd_part_parser *parser,
+			     struct mtd_info *master,
+			     struct mtd_partitions *pparts,
+			     struct mtd_part_parser_data *data)
+{
+	int ret;
+
+	ret = (*parser->parse_fn)(master, &pparts->parts, data);
+	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
+	if (ret <= 0)
+		return ret;
+
+	pr_notice("%d %s partitions found on MTD device %s\n", ret,
+		  parser->name, master->name);
+
+	pparts->nr_parts = ret;
+	pparts->parser = parser;
+
+	return ret;
+}
+
 /**
  * parse_mtd_partitions - parse MTD partitions
  * @master: the master partition (describes whole MTD device)
@@ -815,6 +903,13 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 {
 	struct mtd_part_parser *parser;
 	int ret, err = 0;
+	const char *const *types_of = NULL;
+
+	if (mtd_get_of_node(master)) {
+		types_of = of_get_probes(mtd_get_of_node(master));
+		if (types_of != NULL)
+			types = types_of;
+	}
 
 	if (!types)
 		types = default_mtd_part_types;
@@ -828,16 +923,10 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 			 parser ? parser->name : NULL);
 		if (!parser)
 			continue;
-		ret = (*parser->parse_fn)(master, &pparts->parts, data);
-		pr_debug("%s: parser %s: %i\n",
-			 master->name, parser->name, ret);
-		if (ret > 0) {
-			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
-			       ret, parser->name, master->name);
-			pparts->nr_parts = ret;
-			pparts->parser = parser;
+		ret = mtd_part_do_parse(parser, master, pparts, data);
+		/* Found partitions! */
+		if (ret > 0)
 			return 0;
-		}
 		mtd_part_parser_put(parser);
 		/*
 		 * Stash the first error we see; only report it if no parser
@@ -846,6 +935,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 		if (ret < 0 && !err)
 			err = ret;
 	}
+	kfree(types_of);
 	return err;
 }
 
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index ede407d6e..464470122 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -108,6 +108,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
 
 		parts[i].offset = of_read_number(reg, a_cells);
 		parts[i].size = of_read_number(reg + a_cells, s_cells);
+		parts[i].of_node = pp;
 
 		partname = of_get_property(pp, "label", &len);
 		if (!partname)
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 93ceea4f2..a4f849d69 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -939,6 +939,13 @@ static bool vol_ignored(int vol_id)
 #endif
 }
 
+static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech)
+{
+	return ech->padding1[0] == 'E' &&
+	       ech->padding1[1] == 'O' &&
+	       ech->padding1[2] == 'F';
+}
+
 /**
  * scan_peb - scan and process UBI headers of a PEB.
  * @ubi: UBI device description object
@@ -971,9 +978,21 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
 		return 0;
 	}
 
-	err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
-	if (err < 0)
-		return err;
+	if (!ai->eof_found) {
+		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+		if (err < 0)
+			return err;
+
+		if (ec_hdr_has_eof(ech)) {
+			pr_notice("UBI: EOF marker found, PEBs from %d will be erased",
+				pnum);
+			ai->eof_found = true;
+		}
+	}
+
+	if (ai->eof_found)
+		err = UBI_IO_FF_BITFLIPS;
+
 	switch (err) {
 	case 0:
 		break;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index d1e6931c1..dcd622105 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -50,6 +50,7 @@
 #include <linux/scatterlist.h>
 #include <linux/idr.h>
 #include <asm/div64.h>
+#include <linux/root_dev.h>
 
 #include "ubi-media.h"
 #include "ubi.h"
@@ -447,6 +448,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
 	add_disk(dev->gd);
 	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
 		 dev->ubi_num, dev->vol_id, vi->name);
+
+	if (!strcmp(vi->name, "rootfs") &&
+	    IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+	    ROOT_DEV == 0) {
+		pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
+			  dev->ubi_num, dev->vol_id, vi->name);
+		ROOT_DEV = MKDEV(gd->major, gd->first_minor);
+	}
+
 	return 0;
 
 out_free_queue:
@@ -627,6 +637,45 @@ static void __init ubiblock_create_from_param(void)
 	}
 }
 
+#define UBIFS_NODE_MAGIC  0x06101831
+static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
+{
+	int ret;
+	uint32_t magic_of, magic;
+
+	ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
+	if (ret)
+		return 0;
+	magic = le32_to_cpu(magic_of);
+	return magic == UBIFS_NODE_MAGIC;
+}
+
+static void __init ubiblock_create_auto_rootfs(void)
+{
+	int ubi_num, ret, is_ubifs;
+	struct ubi_volume_desc *desc;
+	struct ubi_volume_info vi;
+
+	for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
+		desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
+		if (IS_ERR(desc))
+			continue;
+
+		ubi_get_volume_info(desc, &vi);
+		is_ubifs = ubi_vol_is_ubifs(desc);
+		ubi_close_volume(desc);
+		if (is_ubifs)
+			break;
+
+		ret = ubiblock_create(&vi);
+		if (ret)
+			pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
+				vi.name, ret);
+		/* always break if we get here */
+		break;
+	}
+}
+
 static void ubiblock_remove_all(void)
 {
 	struct ubiblock *next;
@@ -657,6 +706,10 @@ int __init ubiblock_init(void)
 	 */
 	ubiblock_create_from_param();
 
+	/* auto-attach "rootfs" volume if existing and non-ubifs */
+	if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV))
+		ubiblock_create_auto_rootfs();
+
 	/*
 	 * Block devices are only created upon user requests, so we ignore
 	 * existing volumes.
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 85d54f37e..e556c645b 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1212,6 +1212,49 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
 	return mtd;
 }
 
+/*
+ * This function tries attaching mtd partitions named either "ubi" or "data"
+ * during boot.
+ */
+static void __init ubi_auto_attach(void)
+{
+	int err;
+	struct mtd_info *mtd;
+
+	/* try attaching mtd device named "ubi" or "data" */
+	mtd = open_mtd_device("ubi");
+	if (IS_ERR(mtd))
+		mtd = open_mtd_device("data");
+
+	if (!IS_ERR(mtd)) {
+		size_t len;
+		char magic[4];
+
+		/* check for a valid ubi magic */
+		err = mtd_read(mtd, 0, 4, &len, (void *) magic);
+		if (!err && len == 4 && strncmp(magic, "UBI#", 4)) {
+			pr_err("UBI error: no valid UBI magic found inside mtd%d", mtd->index);
+			put_mtd_device(mtd);
+			return;
+		}
+
+		/* auto-add only media types where UBI makes sense */
+		if (mtd->type == MTD_NANDFLASH ||
+		    mtd->type == MTD_NORFLASH ||
+		    mtd->type == MTD_DATAFLASH ||
+		    mtd->type == MTD_MLCNANDFLASH) {
+			mutex_lock(&ubi_devices_mutex);
+			pr_notice("UBI: auto-attach mtd%d\n", mtd->index);
+			err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
+			mutex_unlock(&ubi_devices_mutex);
+			if (err < 0) {
+				pr_err("UBI error: cannot attach mtd%d", mtd->index);
+				put_mtd_device(mtd);
+			}
+		}
+	}
+}
+
 static int __init ubi_init(void)
 {
 	int err, i, k;
@@ -1295,6 +1338,12 @@ static int __init ubi_init(void)
 		}
 	}
 
+	/* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
+	 * parameter was given */
+	if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
+	    !ubi_is_module() && !mtd_devs)
+		ubi_auto_attach();
+
 	err = ubiblock_init();
 	if (err) {
 		pr_err("UBI error: block: cannot initialize, error %d", err);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 697dbcba7..92d207ff2 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -779,6 +779,7 @@ struct ubi_attach_info {
 	int mean_ec;
 	uint64_t ec_sum;
 	int ec_count;
+	bool eof_found;
 	struct kmem_cache *aeb_slab_cache;
 	struct ubi_ec_hdr *ech;
 	struct ubi_vid_io_buf *vidb;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6e12401b5..8e66e16f2 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -466,6 +466,50 @@ int phy_ethtool_ksettings_get(struct phy_device *phydev,
 }
 EXPORT_SYMBOL(phy_ethtool_ksettings_get);
 
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr)
+{
+	u32 cmd;
+	int tmp;
+	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+	struct ethtool_value edata = { ETHTOOL_GLINK };
+
+	if (get_user(cmd, (u32 *)useraddr))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GSET:
+		phy_ethtool_gset(phydev, &ecmd);
+		if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+			return -EFAULT;
+		return 0;
+
+	case ETHTOOL_SSET:
+		if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+			return -EFAULT;
+		return phy_ethtool_sset(phydev, &ecmd);
+
+	case ETHTOOL_NWAY_RST:
+		/* if autoneg is off, it's an error */
+		tmp = phy_read(phydev, MII_BMCR);
+		if (tmp & BMCR_ANENABLE) {
+			tmp |= (BMCR_ANRESTART);
+			phy_write(phydev, MII_BMCR, tmp);
+			return 0;
+		}
+		return -EINVAL;
+
+	case ETHTOOL_GLINK:
+		edata.data = (phy_read(phydev,
+				MII_BMSR) & BMSR_LSTATUS) ? 1 : 0;
+		if (copy_to_user(useraddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		return 0;
+	}
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(phy_ethtool_ioctl);
+
 /**
  * phy_mii_ioctl - generic PHY MII ioctl interface
  * @phydev: the phy_device struct
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bf02f8e46..1def50c23 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1001,6 +1001,9 @@ void phy_detach(struct phy_device *phydev)
 	struct mii_bus *bus;
 	int i;
 
+	if (phydev->drv && phydev->drv->detach)
+		phydev->drv->detach(phydev);
+
 	phydev->attached_dev->phydev = NULL;
 	phydev->attached_dev = NULL;
 	phy_suspend(phydev);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f0a420e03..d7d342e7a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -906,7 +906,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
 		goto end;
 
 
-	skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+	skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32 + NET_SKB_PAD,
 			   0, GFP_KERNEL);
 	if (!skb) {
 		error = -ENOMEM;
@@ -914,7 +914,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
 	}
 
 	/* Reserve space for headers. */
-	skb_reserve(skb, dev->hard_header_len);
+	skb_reserve(skb, dev->hard_header_len + NET_SKB_PAD);
 	skb_reset_network_header(skb);
 
 	skb->dev = dev;
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index d820f3edd..b0b1904bc 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -10,6 +10,7 @@
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/export.h>
+#include <linux/mtd/mtd.h>
 
 /**
  * of_get_phy_mode - Get phy mode for given device_node
@@ -38,7 +39,7 @@ int of_get_phy_mode(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_get_phy_mode);
 
-static const void *of_get_mac_addr(struct device_node *np, const char *name)
+static void *of_get_mac_addr(struct device_node *np, const char *name)
 {
 	struct property *pp = of_find_property(np, name, NULL);
 
@@ -47,6 +48,73 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
 	return NULL;
 }
 
+static const void *of_get_mac_address_mtd(struct device_node *np)
+{
+#ifdef CONFIG_MTD
+	struct device_node *mtd_np = NULL;
+	struct property *prop;
+	size_t retlen;
+	int size, ret;
+	struct mtd_info *mtd;
+	const char *part;
+	const __be32 *list;
+	phandle phandle;
+	u32 mac_inc = 0;
+	u8 mac[ETH_ALEN];
+	void *addr;
+
+	list = of_get_property(np, "mtd-mac-address", &size);
+	if (!list || (size != (2 * sizeof(*list))))
+		return NULL;
+
+	phandle = be32_to_cpup(list++);
+	if (phandle)
+		mtd_np = of_find_node_by_phandle(phandle);
+
+	if (!mtd_np)
+		return NULL;
+
+	part = of_get_property(mtd_np, "label", NULL);
+	if (!part)
+		part = mtd_np->name;
+
+	mtd = get_mtd_device_nm(part);
+	if (IS_ERR(mtd))
+		return NULL;
+
+	ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, mac);
+	put_mtd_device(mtd);
+
+	if (!of_property_read_u32(np, "mtd-mac-address-increment", &mac_inc))
+		mac[5] += mac_inc;
+
+	if (!is_valid_ether_addr(mac))
+		return NULL;
+
+	addr = of_get_mac_addr(np, "mac-address");
+	if (addr) {
+		memcpy(addr, mac, ETH_ALEN);
+		return addr;
+	}
+
+	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+	if (!prop)
+		return NULL;
+
+	prop->name = "mac-address";
+	prop->length = ETH_ALEN;
+	prop->value = kmemdup(mac, ETH_ALEN, GFP_KERNEL);
+	if (!prop->value || of_add_property(np, prop))
+		goto free;
+
+	return prop->value;
+free:
+	kfree(prop->value);
+	kfree(prop);
+#endif
+	return NULL;
+}
+
 /**
  * Search the device tree for the best MAC address to use.  'mac-address' is
  * checked first, because that is supposed to contain to "most recent" MAC
@@ -64,11 +132,18 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
  * addresses.  Some older U-Boots only initialized 'local-mac-address'.  In
  * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
  * but is all zeros.
+ *
+ * If a mtd-mac-address property exists, try to fetch the MAC address from the
+ * specified mtd device, and store it as a 'mac-address' property
 */
 const void *of_get_mac_address(struct device_node *np)
 {
 	const void *addr;
 
+	addr = of_get_mac_address_mtd(np);
+	if (addr)
+		return addr;
+
 	addr = of_get_mac_addr(np, "mac-address");
 	if (addr)
 		return addr;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 6555eb78d..120fe3921 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -71,6 +71,12 @@ config XEN_PCIDEV_FRONTEND
           The PCI device frontend driver allows the kernel to import arbitrary
           PCI devices from a PCI backend to support PCI driver domains.
 
+config PCI_DISABLE_COMMON_QUIRKS
+	bool "PCI disable common quirks"
+	depends on PCI
+	help
+	  If you don't know what to do here, say N.
+
 config HT_IRQ
 	bool "Interrupts on hypertransport devices"
 	default y
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5d8151b43..cb356e78e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -41,6 +41,7 @@ static void quirk_mmio_always_on(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
 				PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
 
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
 /* The Mellanox Tavor device gives false positive parity errors
  * Mark this device with a broken_parity_status, to allow
  * PCI scanning code to "skip" this now blacklisted device.
@@ -3038,6 +3039,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
 
+#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
 
 /*
  * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.  To
@@ -3094,6 +3096,8 @@ static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
 	}
 }
 
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+
 /*
  * Some BIOS implementations leave the Intel GPU interrupts enabled,
  * even though no one is handling them (f.e. i915 driver is never loaded).
@@ -3128,6 +3132,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
 
+#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
 /*
  * PCI devices which are on Intel chips can skip the 10ms delay
  * before entering D3 mode.
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6db80635a..2eb2b1765 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -700,7 +700,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
 		enable = !enable;
 
 	if (gpio_is_valid(spi->cs_gpio))
-		gpio_set_value(spi->cs_gpio, !enable);
+		gpio_set_value_cansleep(spi->cs_gpio, !enable);
 	else if (spi->master->set_cs)
 		spi->master->set_cs(spi, !enable);
 }
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e05046f8..09d03763f 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -696,6 +696,7 @@ static struct class *spidev_class;
 static const struct of_device_id spidev_dt_ids[] = {
 	{ .compatible = "rohm,dh2228fv" },
 	{ .compatible = "lineartechnology,ltc2488" },
+	{ .compatible = "siliconlabs,si3210" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219c2..a20c168cb 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -29,6 +29,7 @@ config SSB_SPROM
 config SSB_BLOCKIO
 	bool
 	depends on SSB
+	default y
 
 config SSB_PCIHOST_POSSIBLE
 	bool
@@ -49,7 +50,7 @@ config SSB_PCIHOST
 config SSB_B43_PCI_BRIDGE
 	bool
 	depends on SSB_PCIHOST
-	default n
+	default y
 
 config SSB_PCMCIAHOST_POSSIBLE
 	bool
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 1713248ab..16c19a31d 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -11,8 +11,10 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
+#include <linux/usb/of.h>
 
 struct usbport_trig_data {
 	struct led_classdev *led_cdev;
@@ -123,6 +125,57 @@ static const struct attribute_group ports_group = {
  * Adding & removing ports
  ***************************************/
 
+/**
+ * usbport_trig_port_observed - Check if port should be observed
+ */
+static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data,
+				       struct usb_device *usb_dev, int port1)
+{
+	struct device *dev = usbport_data->led_cdev->dev;
+	struct device_node *led_np = dev->of_node;
+	struct of_phandle_args args;
+	struct device_node *port_np;
+	int count, i;
+
+	if (!led_np)
+		return false;
+
+	/* Get node of port being added */
+	port_np = usb_of_get_child_node(usb_dev->dev.of_node, port1);
+	if (!port_np)
+		return false;
+
+	/* Amount of trigger sources for this LED */
+	count = of_count_phandle_with_args(led_np, "trigger-sources",
+					   "#trigger-source-cells");
+	if (count < 0) {
+		dev_warn(dev, "Failed to get trigger sources for %s\n",
+			 led_np->full_name);
+		return false;
+	}
+
+	/* Check list of sources for this specific port */
+	for (i = 0; i < count; i++) {
+		int err;
+
+		err = of_parse_phandle_with_args(led_np, "trigger-sources",
+						 "#trigger-source-cells", i,
+						 &args);
+		if (err) {
+			dev_err(dev, "Failed to get trigger source phandle at index %d: %d\n",
+				i, err);
+			continue;
+		}
+
+		of_node_put(args.np);
+
+		if (args.np == port_np)
+			return true;
+	}
+
+	return false;
+}
+
 static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
 				 struct usb_device *usb_dev,
 				 const char *hub_name, int portnum)
@@ -141,6 +194,8 @@ static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
 	port->data = usbport_data;
 	port->hub = usb_dev;
 	port->portnum = portnum;
+	port->observed = usbport_trig_port_observed(usbport_data, usb_dev,
+						    portnum);
 
 	len = strlen(hub_name) + 8;
 	port->port_name = kzalloc(len, GFP_KERNEL);
@@ -255,6 +310,7 @@ static void usbport_trig_activate(struct led_classdev *led_cdev)
 	if (err)
 		goto err_free;
 	usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
+	usbport_trig_update_count(usbport_data);
 
 	/* Notifications */
 	usbport_data->nb.notifier_call = usbport_trig_notify,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 5133ab965..7a641a341 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -321,9 +321,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
  */
 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
 {
-	static int			pipetypes[4] = {
-		PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
-	};
 	int				xfertype, max;
 	struct usb_device		*dev;
 	struct usb_host_endpoint	*ep;
@@ -441,11 +438,6 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
 	 * cause problems in HCDs if they get it wrong.
 	 */
 
-	/* Check that the pipe's type matches the endpoint's type */
-	if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
-		dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
-			usb_pipetype(urb->pipe), pipetypes[xfertype]);
-
 	/* Check against a simple/standard policy */
 	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
 			URB_FREE_BUFFER);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a06578..1b6f5e1cf 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5184,7 +5184,6 @@ error3:
 error2:
 	usb_put_hcd(hcd);
 error1:
-	kfree(hsotg->core_params);
 
 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
 	kfree(hsotg->last_frame_num_array);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 063064801..837de353b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -651,7 +651,7 @@ static int ehci_run (struct usb_hcd *hcd)
 		"USB %x.%x started, EHCI %x.%02x%s\n",
 		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
 		temp >> 8, temp & 0xff,
-		ignore_oc ? ", overcurrent ignored" : "");
+		(ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
 
 	ehci_writel(ehci, INTR_MASK,
 		    &ehci->regs->intr_enable); /* Turn On Interrupts */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 74f62d68f..c8ddfdf73 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -638,7 +638,7 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
 	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
 	 * PORT_POWER; that's surprising, but maybe within-spec.
 	 */
-	if (!ignore_oc)
+	if (!ignore_oc && !ehci->ignore_oc)
 		mask = PORT_CSC | PORT_PEC | PORT_OCC;
 	else
 		mask = PORT_CSC | PORT_PEC;
@@ -1008,7 +1008,7 @@ int ehci_hub_control(
 		if (temp & PORT_PEC)
 			status |= USB_PORT_STAT_C_ENABLE << 16;
 
-		if ((temp & PORT_OCC) && !ignore_oc){
+		if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)) {
 			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
 
 			/*
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index a268d9e8d..88a5166f2 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -259,6 +259,8 @@ static int ehci_platform_probe(struct platform_device *dev)
 		hcd->has_tt = 1;
 	if (pdata->reset_on_resume)
 		priv->reset_on_resume = true;
+	if (pdata->ignore_oc)
+		ehci->ignore_oc = 1;
 
 #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
 	if (ehci->big_endian_mmio) {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 3f3b74aec..3b06bb779 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -230,6 +230,7 @@ struct ehci_hcd {			/* one per controller */
 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
 	unsigned		need_oc_pp_cycle:1; /* MPC834X port power */
 	unsigned		imx28_write_fix:1; /* For Freescale i.MX28 */
+	unsigned		ignore_oc:1;
 
 	/* required for usb32 quirk */
 	#define OHCI_CTRL_HCFS          (3 << 6)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index ee213c5f4..12a5628e7 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -107,6 +107,8 @@ struct amd_chipset_type {
 	u8 rev;
 };
 
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+
 static struct amd_chipset_info {
 	struct pci_dev	*nb_dev;
 	struct pci_dev	*smbus_dev;
@@ -511,6 +513,10 @@ void usb_amd_dev_put(void)
 }
 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 
+#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */
+
+#if IS_ENABLED(CONFIG_USB_UHCI_HCD)
+
 /*
  * Make sure the controller is completely inactive, unable to
  * generate interrupts or do DMA.
@@ -590,8 +596,18 @@ reset_needed:
 	uhci_reset_hc(pdev, base);
 	return 1;
 }
+#else
+int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
+{
+	return 0;
+}
+
+#endif
+
 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
 
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+
 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
 {
 	u16 cmd;
@@ -1158,3 +1174,4 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
 			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+#endif
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6463fdb40..ae4c2f723 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -4,6 +4,9 @@
 #ifdef CONFIG_PCI
 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+#endif  /* CONFIG_PCI */
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS)
 int usb_amd_find_chipset_info(void);
 int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
 bool usb_amd_hang_symptom_quirk(void);
@@ -17,12 +20,25 @@ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
 #else
 struct pci_dev;
+static inline int usb_amd_find_chipset_info(void)
+{
+	return 0;
+}
+static inline bool usb_amd_hang_symptom_quirk(void)
+{
+	return false;
+}
+static inline bool usb_amd_prefetch_quirk(void)
+{
+	return false;
+}
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
 static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 static inline void sb800_prefetch(struct device *dev, int on) {}
+static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {}
 #endif  /* CONFIG_PCI */
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig
index d8bb6c411..74bf4b162 100644
--- a/fs/jffs2/Kconfig
+++ b/fs/jffs2/Kconfig
@@ -139,6 +139,15 @@ config JFFS2_LZO
 	  This feature was added in July, 2007. Say 'N' if you need
 	  compatibility with older bootloaders or kernels.
 
+config JFFS2_LZMA
+	bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
+	select LZMA_COMPRESS
+	select LZMA_DECOMPRESS
+	depends on JFFS2_FS
+	default n
+	help
+	  JFFS2 wrapper to the LZMA C SDK
+
 config JFFS2_RTIME
 	bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
 	depends on JFFS2_FS
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index 60e5d49ca..23ba6efd1 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -18,4 +18,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN)	+= compr_rubin.o
 jffs2-$(CONFIG_JFFS2_RTIME)	+= compr_rtime.o
 jffs2-$(CONFIG_JFFS2_ZLIB)	+= compr_zlib.o
 jffs2-$(CONFIG_JFFS2_LZO)	+= compr_lzo.o
+jffs2-$(CONFIG_JFFS2_LZMA)      += compr_lzma.o
 jffs2-$(CONFIG_JFFS2_SUMMARY)   += summary.o
+
+CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index b288c8ae1..e768f9d41 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
 	dbg_fsbuild("scanned flash completely\n");
 	jffs2_dbg_dump_block_lists_nolock(c);
 
+	if (c->flags & (1 << 7)) {
+		printk("%s(): unlocking the mtd device... ", __func__);
+		mtd_unlock(c->mtd, 0, c->mtd->size);
+		printk("done.\n");
+
+		printk("%s(): erasing all blocks after the end marker... ", __func__);
+		jffs2_erase_pending_blocks(c, -1);
+		printk("done.\n");
+	}
+
 	dbg_fsbuild("pass 1 starting\n");
 	c->flags |= JFFS2_SB_FLAG_BUILDING;
 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 4849a4c9a..6ec806abb 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void)
 #ifdef CONFIG_JFFS2_LZO
 	jffs2_lzo_init();
 #endif
+#ifdef CONFIG_JFFS2_LZMA
+        jffs2_lzma_init();
+#endif
 /* Setting default compression mode */
 #ifdef CONFIG_JFFS2_CMODE_NONE
 	jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void)
 int jffs2_compressors_exit(void)
 {
 /* Unregistering compressors */
+#ifdef CONFIG_JFFS2_LZMA
+        jffs2_lzma_exit();
+#endif
 #ifdef CONFIG_JFFS2_LZO
 	jffs2_lzo_exit();
 #endif
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 5e91d578f..32db2e1ec 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -29,9 +29,9 @@
 #define JFFS2_DYNRUBIN_PRIORITY  20
 #define JFFS2_LZARI_PRIORITY     30
 #define JFFS2_RTIME_PRIORITY     50
-#define JFFS2_ZLIB_PRIORITY      60
-#define JFFS2_LZO_PRIORITY       80
-
+#define JFFS2_LZMA_PRIORITY      70
+#define JFFS2_ZLIB_PRIORITY      80
+#define JFFS2_LZO_PRIORITY       90
 
 #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
 #define JFFS2_DYNRUBIN_DISABLED  /*	   for decompression */
@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
 int jffs2_lzo_init(void);
 void jffs2_lzo_exit(void);
 #endif
+#ifdef CONFIG_JFFS2_LZMA
+int jffs2_lzma_init(void);
+void jffs2_lzma_exit(void);
+#endif
 
 #endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_lzma.c b/fs/jffs2/compr_lzma.c
new file mode 100644
index 000000000..0fe3b75d7
--- /dev/null
+++ b/fs/jffs2/compr_lzma.c
@@ -0,0 +1,128 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * JFFS2 wrapper to the LZMA C SDK
+ *
+ */
+
+#include <linux/lzma.h>
+#include "compr.h"
+
+#ifdef __KERNEL__
+	static DEFINE_MUTEX(deflate_mutex);
+#endif
+
+CLzmaEncHandle *p;
+Byte propsEncoded[LZMA_PROPS_SIZE];
+SizeT propsSize = sizeof(propsEncoded);
+
+STATIC void lzma_free_workspace(void)
+{
+	LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
+}
+
+STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
+{
+	if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
+	{
+		PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
+		return -ENOMEM;
+	}
+
+	if (LzmaEnc_SetProps(p, props) != SZ_OK)
+	{
+		lzma_free_workspace();
+		return -1;
+	}
+	
+	if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
+	{
+		lzma_free_workspace();
+		return -1;
+	}
+
+        return 0;
+}
+
+STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
+			      uint32_t *sourcelen, uint32_t *dstlen)
+{
+	SizeT compress_size = (SizeT)(*dstlen);
+	int ret;
+
+	#ifdef __KERNEL__
+		mutex_lock(&deflate_mutex);
+	#endif
+
+	ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
+		0, NULL, &lzma_alloc, &lzma_alloc);
+
+	#ifdef __KERNEL__
+		mutex_unlock(&deflate_mutex);
+	#endif
+
+	if (ret != SZ_OK)
+		return -1;
+
+	*dstlen = (uint32_t)compress_size;
+
+	return 0;
+}
+
+STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
+				 uint32_t srclen, uint32_t destlen)
+{
+	int ret;
+	SizeT dl = (SizeT)destlen;
+	SizeT sl = (SizeT)srclen;
+	ELzmaStatus status;
+	
+	ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
+		propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
+
+	if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
+		return -1;
+
+	return 0;
+}
+
+static struct jffs2_compressor jffs2_lzma_comp = {
+	.priority = JFFS2_LZMA_PRIORITY,
+	.name = "lzma",
+	.compr = JFFS2_COMPR_LZMA,
+	.compress = &jffs2_lzma_compress,
+	.decompress = &jffs2_lzma_decompress,
+	.disabled = 0,
+};
+
+int INIT jffs2_lzma_init(void)
+{
+        int ret;
+	CLzmaEncProps props;
+	LzmaEncProps_Init(&props);
+
+        props.dictSize = LZMA_BEST_DICT(0x2000);
+        props.level = LZMA_BEST_LEVEL;
+        props.lc = LZMA_BEST_LC;
+        props.lp = LZMA_BEST_LP;
+        props.pb = LZMA_BEST_PB;
+        props.fb = LZMA_BEST_FB;
+
+	ret = lzma_alloc_workspace(&props);
+        if (ret < 0)
+                return ret;
+
+	ret = jffs2_register_compressor(&jffs2_lzma_comp);
+	if (ret)
+		lzma_free_workspace();
+	
+        return ret;
+}
+
+void jffs2_lzma_exit(void)
+{
+	jffs2_unregister_compressor(&jffs2_lzma_comp);
+	lzma_free_workspace();
+}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 0a754f384..8b9feab7f 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -756,6 +756,24 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
 	return ret;
 }
 
+static int jffs2_whiteout(struct inode *old_dir, struct dentry *old_dentry)
+{
+	struct dentry *wh;
+	int err;
+
+	wh = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
+	if (!wh)
+		return -ENOMEM;
+
+	err = jffs2_mknod(old_dir, wh, S_IFCHR | WHITEOUT_MODE,
+			  WHITEOUT_DEV);
+	if (err)
+		return err;
+
+	d_rehash(wh);
+	return 0;
+}
+
 static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 			 struct inode *new_dir_i, struct dentry *new_dentry,
 			 unsigned int flags)
@@ -763,18 +781,30 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 	int ret;
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
 	struct jffs2_inode_info *victim_f = NULL;
+	struct inode *fst_inode = d_inode(old_dentry);
+	struct inode *snd_inode = d_inode(new_dentry);
 	uint8_t type;
 	uint32_t now;
 
-	if (flags & ~RENAME_NOREPLACE)
+	if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE))
 		return -EINVAL;
 
+	if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) {
+		if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
+			inc_nlink(new_dir_i);
+			drop_nlink(old_dir_i);
+		} else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
+			drop_nlink(new_dir_i);
+			inc_nlink(old_dir_i);
+		}
+	}
+
 	/* The VFS will check for us and prevent trying to rename a
 	 * file over a directory and vice versa, but if it's a directory,
 	 * the VFS can't check whether the victim is empty. The filesystem
 	 * needs to do that for itself.
 	 */
-	if (d_really_is_positive(new_dentry)) {
+	if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) {
 		victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
 		if (d_is_dir(new_dentry)) {
 			struct jffs2_full_dirent *fd;
@@ -809,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 	if (ret)
 		return ret;
 
-	if (victim_f) {
+	if (victim_f && !(flags & RENAME_EXCHANGE)) {
 		/* There was a victim. Kill it off nicely */
 		if (d_is_dir(new_dentry))
 			clear_nlink(d_inode(new_dentry));
@@ -832,9 +862,20 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 	if (d_is_dir(old_dentry) && !victim_f)
 		inc_nlink(new_dir_i);
 
-	/* Unlink the original */
-	ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
-			      old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
+	if (flags & RENAME_WHITEOUT)
+		/* Replace with whiteout */
+		ret = jffs2_whiteout(old_dir_i, old_dentry);
+	else if (flags & RENAME_EXCHANGE)
+		/* Replace the original */
+		ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i),
+				    d_inode(new_dentry)->i_ino, type,
+				    old_dentry->d_name.name, old_dentry->d_name.len,
+				    now);
+	else
+		/* Unlink the original */
+		ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+				      old_dentry->d_name.name,
+				      old_dentry->d_name.len, NULL, now);
 
 	/* We don't touch inode->i_nlink */
 
@@ -861,7 +902,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 		return ret;
 	}
 
-	if (d_is_dir(old_dentry))
+	if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE))
 		drop_nlink(old_dir_i);
 
 	new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 90431dd61..8c78ab19c 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
 		/* reset summary info for next eraseblock scan */
 		jffs2_sum_reset_collected(s);
 
-		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
-						buf_size, s);
+		if (c->flags & (1 << 7)) {
+			if (mtd_block_isbad(c->mtd, jeb->offset))
+				ret = BLK_STATE_BADBLOCK;
+			else
+				ret = BLK_STATE_ALLFF;
+		} else
+			ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+							buf_size, s);
 
 		if (ret < 0)
 			goto out;
@@ -561,6 +567,17 @@ full_scan:
 			return err;
 	}
 
+	if ((buf[0] == 0xde) &&
+		(buf[1] == 0xad) &&
+		(buf[2] == 0xc0) &&
+		(buf[3] == 0xde)) {
+		/* end of filesystem. erase everything after this point */
+		printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+		c->flags |= (1 << 7);
+
+		return BLK_STATE_ALLFF;
+	}
+
 	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
 	ofs = 0;
 	max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 5ef21f4c4..62e730216 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -372,14 +372,41 @@ static int __init init_jffs2_fs(void)
 	BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
 	BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
 
-	pr_info("version 2.2."
+	pr_info("version 2.2"
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
 	       " (NAND)"
 #endif
 #ifdef CONFIG_JFFS2_SUMMARY
-	       " (SUMMARY) "
+	       " (SUMMARY)"
 #endif
-	       " © 2001-2006 Red Hat, Inc.\n");
+#ifdef CONFIG_JFFS2_ZLIB
+	       " (ZLIB)"
+#endif
+#ifdef CONFIG_JFFS2_LZO
+	       " (LZO)"
+#endif
+#ifdef CONFIG_JFFS2_LZMA
+	       " (LZMA)"
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+	       " (RTIME)"
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+	       " (RUBIN)"
+#endif
+#ifdef  CONFIG_JFFS2_CMODE_NONE
+	       " (CMODE_NONE)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_PRIORITY
+	       " (CMODE_PRIORITY)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_SIZE
+	       " (CMODE_SIZE)"
+#endif
+#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
+	       " (CMODE_FAVOURLZO)"
+#endif
+	       " (c) 2001-2006 Red Hat, Inc.\n");
 
 	jffs2_inode_cachep = kmem_cache_create("jffs2_i",
 					     sizeof(struct jffs2_inode_info),
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 97be41215..3be28900b 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -452,16 +452,22 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
  */
 static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
 {
+	ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
+	unsigned long long delta = dirty_writeback_interval;
+
+	/* centi to milli, milli to nano, then 10% */
+	delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
+
 	ubifs_assert(!hrtimer_active(&wbuf->timer));
+	ubifs_assert(delta <= ULONG_MAX);
 
 	if (wbuf->no_timer)
 		return;
 	dbg_io("set timer for jhead %s, %llu-%llu millisecs",
 	       dbg_jhead(wbuf->jhead),
-	       div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
-	       div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
-		       USEC_PER_SEC));
-	hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
+	       div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
+	       div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
+	hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
 			       HRTIMER_MODE_REL);
 }
 
@@ -1059,10 +1065,6 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
 
 	hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	wbuf->timer.function = wbuf_timer_callback_nolock;
-	wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
-	wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
-	wbuf->delta *= 1000000000ULL;
-	ubifs_assert(wbuf->delta <= ULONG_MAX);
 	return 0;
 }
 
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 3cbb904a6..460cb9e3b 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -63,6 +63,17 @@
 /* Default time granularity in nanoseconds */
 #define DEFAULT_TIME_GRAN 1000000000
 
+static int get_default_compressor(void)
+{
+	if (ubifs_compr_present(UBIFS_COMPR_LZO))
+		return UBIFS_COMPR_LZO;
+
+	if (ubifs_compr_present(UBIFS_COMPR_ZLIB))
+		return UBIFS_COMPR_ZLIB;
+
+	return UBIFS_COMPR_NONE;
+}
+
 /**
  * create_default_filesystem - format empty UBI volume.
  * @c: UBIFS file-system description object
@@ -183,7 +194,7 @@ static int create_default_filesystem(struct ubifs_info *c)
 	if (c->mount_opts.override_compr)
 		sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
 	else
-		sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+		sup->default_compr = cpu_to_le16(get_default_compressor());
 
 	generate_random_uuid(sup->uuid);
 
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 096035eb2..b8b18d446 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -83,10 +83,6 @@
  */
 #define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
 
-/* Write-buffer synchronization timeout interval in seconds */
-#define WBUF_TIMEOUT_SOFTLIMIT 3
-#define WBUF_TIMEOUT_HARDLIMIT 5
-
 /* Maximum possible inode number (only 32-bit inodes are supported now) */
 #define MAX_INUM 0xFFFFFFFF
 
@@ -645,9 +641,6 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
  * @io_mutex: serializes write-buffer I/O
  * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes
  *        fields
- * @softlimit: soft write-buffer timeout interval
- * @delta: hard and soft timeouts delta (the timer expire interval is @softlimit
- *         and @softlimit + @delta)
  * @timer: write-buffer timer
  * @no_timer: non-zero if this write-buffer does not have a timer
  * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing
@@ -676,8 +669,6 @@ struct ubifs_wbuf {
 	int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad);
 	struct mutex io_mutex;
 	spinlock_t lock;
-	ktime_t softlimit;
-	unsigned long long delta;
 	struct hrtimer timer;
 	unsigned int no_timer:1;
 	unsigned int need_sync:1;
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 6fec9e81b..83ae88cb3 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+					   unsigned int txqs,
+					   unsigned int rxqs);
+#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
+
 struct sk_buff **eth_gro_receive(struct sk_buff **head,
 				 struct sk_buff *skb);
 int eth_gro_complete(struct sk_buff *skb, int nhoff);
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c6587c01d..0911c8cd5 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,8 @@ struct br_ip_list {
 #define BR_LEARNING_SYNC	BIT(9)
 #define BR_PROXYARP_WIFI	BIT(10)
 #define BR_MCAST_FLOOD		BIT(11)
+#define BR_MULTICAST_TO_UNICAST	BIT(12)
+#define BR_ISOLATE_MODE		BIT(13)
 
 #define BR_DEFAULT_AGEING_TIME	(300 * HZ)
 
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 707944c48..8f86eb88b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -113,10 +113,16 @@ struct led_classdev {
 	struct mutex		led_access;
 };
 
-extern int led_classdev_register(struct device *parent,
-				 struct led_classdev *led_cdev);
-extern int devm_led_classdev_register(struct device *parent,
-				      struct led_classdev *led_cdev);
+extern int of_led_classdev_register(struct device *parent,
+				    struct device_node *np,
+				    struct led_classdev *led_cdev);
+#define led_classdev_register(parent, led_cdev)				\
+	of_led_classdev_register(parent, NULL, led_cdev)
+extern int devm_of_led_classdev_register(struct device *parent,
+					 struct device_node *np,
+					 struct led_classdev *led_cdev);
+#define devm_led_classdev_register(parent, led_cdev)			\
+	devm_of_led_classdev_register(parent, NULL, led_cdev)
 extern void led_classdev_unregister(struct led_classdev *led_cdev);
 extern void devm_led_classdev_unregister(struct device *parent,
 					 struct led_classdev *led_cdev);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 616eef4d8..29ef0938c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -38,6 +38,9 @@
 #include <linux/acpi.h>
 #include <linux/cdrom.h>
 #include <linux/sched.h>
+#ifdef CONFIG_ATA_LEDS
+#include <linux/leds.h>
+#endif
 
 /*
  * Define if arch has non-standard setup.  This is a _PCI_ standard
@@ -883,6 +886,12 @@ struct ata_port {
 #ifdef CONFIG_ATA_ACPI
 	struct ata_acpi_gtm	__acpi_init_gtm; /* use ata_acpi_init_gtm() */
 #endif
+
+#ifdef CONFIG_ATA_LEDS
+	struct led_trigger	*ledtrig;
+	char			ledtrig_name[8];
+#endif
+
 	/* owned by EH */
 	u8			sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
 };
diff --git a/include/linux/lzma.h b/include/linux/lzma.h
new file mode 100644
index 000000000..5f31334df
--- /dev/null
+++ b/include/linux/lzma.h
@@ -0,0 +1,62 @@
+#ifndef __LZMA_H__
+#define __LZMA_H__
+
+#ifdef __KERNEL__
+	#include <linux/kernel.h>
+	#include <linux/sched.h>
+	#include <linux/slab.h>
+	#include <linux/vmalloc.h>
+	#include <linux/init.h>
+	#define LZMA_MALLOC vmalloc
+	#define LZMA_FREE vfree
+	#define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
+	#define INIT __init
+	#define STATIC static
+#else
+	#include <stdint.h>
+	#include <stdlib.h>
+	#include <stdio.h>
+	#include <unistd.h>
+	#include <string.h>
+	#include <asm/types.h>
+	#include <errno.h>
+	#include <linux/jffs2.h>
+	#ifndef PAGE_SIZE
+		extern int page_size;
+		#define PAGE_SIZE page_size
+	#endif
+	#define LZMA_MALLOC malloc
+	#define LZMA_FREE free
+	#define PRINT_ERROR(msg) fprintf(stderr, msg)
+	#define INIT
+	#define STATIC
+#endif
+
+#include "lzma/LzmaDec.h"
+#include "lzma/LzmaEnc.h"
+
+#define LZMA_BEST_LEVEL (9)
+#define LZMA_BEST_LC    (0)
+#define LZMA_BEST_LP    (0)
+#define LZMA_BEST_PB    (0)
+#define LZMA_BEST_FB  (273)
+
+#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
+
+static void *p_lzma_malloc(void *p, size_t size)
+{
+        if (size == 0)
+                return NULL;
+
+        return LZMA_MALLOC(size);
+}
+
+static void p_lzma_free(void *p, void *address)
+{
+        if (address != NULL)
+                LZMA_FREE(address);
+}
+
+static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
+
+#endif
diff --git a/include/linux/lzma/LzFind.h b/include/linux/lzma/LzFind.h
new file mode 100644
index 000000000..6d4f8e239
--- /dev/null
+++ b/include/linux/lzma/LzFind.h
@@ -0,0 +1,98 @@
+/* LzFind.h -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_FIND_H
+#define __LZ_FIND_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef UInt32 CLzRef;
+
+typedef struct _CMatchFinder
+{
+  Byte *buffer;
+  UInt32 pos;
+  UInt32 posLimit;
+  UInt32 streamPos;
+  UInt32 lenLimit;
+
+  UInt32 cyclicBufferPos;
+  UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
+
+  UInt32 matchMaxLen;
+  CLzRef *hash;
+  CLzRef *son;
+  UInt32 hashMask;
+  UInt32 cutValue;
+
+  Byte *bufferBase;
+  ISeqInStream *stream;
+  int streamEndWasReached;
+
+  UInt32 blockSize;
+  UInt32 keepSizeBefore;
+  UInt32 keepSizeAfter;
+
+  UInt32 numHashBytes;
+  int directInput;
+  size_t directInputRem;
+  int btMode;
+  int bigHash;
+  UInt32 historySize;
+  UInt32 fixedHashSize;
+  UInt32 hashSizeSum;
+  UInt32 numSons;
+  SRes result;
+  UInt32 crc[256];
+} CMatchFinder;
+
+#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
+#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
+
+#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+void MatchFinder_Construct(CMatchFinder *p);
+
+/* Conditions:
+     historySize <= 3 GB
+     keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
+*/
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+    UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+    ISzAlloc *alloc);
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
+
+/*
+Conditions:
+  Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
+  Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
+*/
+
+typedef void (*Mf_Init_Func)(void *object);
+typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
+typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
+typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
+typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
+typedef void (*Mf_Skip_Func)(void *object, UInt32);
+
+typedef struct _IMatchFinder
+{
+  Mf_Init_Func Init;
+  Mf_GetIndexByte_Func GetIndexByte;
+  Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
+  Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
+  Mf_GetMatches_Func GetMatches;
+  Mf_Skip_Func Skip;
+} IMatchFinder;
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/LzHash.h b/include/linux/lzma/LzHash.h
new file mode 100644
index 000000000..f3e89966c
--- /dev/null
+++ b/include/linux/lzma/LzHash.h
@@ -0,0 +1,54 @@
+/* LzHash.h -- HASH functions for LZ algorithms
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_HASH_H
+#define __LZ_HASH_H
+
+#define kHash2Size (1 << 10)
+#define kHash3Size (1 << 16)
+#define kHash4Size (1 << 20)
+
+#define kFix3HashSize (kHash2Size)
+#define kFix4HashSize (kHash2Size + kHash3Size)
+#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
+
+#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
+
+#define HASH3_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
+
+#define HASH4_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
+
+#define HASH5_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
+  hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
+  hash4Value &= (kHash4Size - 1); }
+
+/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
+#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
+
+
+#define MT_HASH2_CALC \
+  hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
+
+#define MT_HASH3_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
+
+#define MT_HASH4_CALC { \
+  UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+  hash2Value = temp & (kHash2Size - 1); \
+  hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+  hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
+
+#endif
diff --git a/include/linux/lzma/LzmaDec.h b/include/linux/lzma/LzmaDec.h
new file mode 100644
index 000000000..c90f95e9f
--- /dev/null
+++ b/include/linux/lzma/LzmaDec.h
@@ -0,0 +1,130 @@
+/* LzmaDec.h -- LZMA Decoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_DEC_H
+#define __LZMA_DEC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* #define _LZMA_PROB32 */
+/* _LZMA_PROB32 can increase the speed on some CPUs,
+   but memory usage for CLzmaDec::probs will be doubled in that case */
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+
+/* ---------- LZMA Properties ---------- */
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaProps
+{
+  unsigned lc, lp, pb;
+  UInt32 dicSize;
+} CLzmaProps;
+
+
+/* ---------- LZMA Decoder state ---------- */
+
+/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
+   Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
+
+#define LZMA_REQUIRED_INPUT_MAX 20
+
+typedef struct
+{
+  CLzmaProps prop;
+  CLzmaProb *probs;
+  Byte *dic;
+  const Byte *buf;
+  UInt32 range, code;
+  SizeT dicPos;
+  SizeT dicBufSize;
+  UInt32 processedPos;
+  UInt32 checkDicSize;
+  unsigned state;
+  UInt32 reps[4];
+  unsigned remainLen;
+  int needFlush;
+  int needInitState;
+  UInt32 numProbs;
+  unsigned tempBufSize;
+  Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
+} CLzmaDec;
+
+#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
+
+/* There are two types of LZMA streams:
+     0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
+     1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+
+typedef enum
+{
+  LZMA_FINISH_ANY,   /* finish at any point */
+  LZMA_FINISH_END    /* block must be finished at the end */
+} ELzmaFinishMode;
+
+/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
+
+   You must use LZMA_FINISH_END, when you know that current output buffer
+   covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
+
+   If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
+   and output value of destLen will be less than output buffer size limit.
+   You can check status result also.
+
+   You can use multiple checks to test data integrity after full decompression:
+     1) Check Result and "status" variable.
+     2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
+     3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
+        You must use correct finish mode in that case. */
+
+typedef enum
+{
+  LZMA_STATUS_NOT_SPECIFIED,               /* use main error code instead */
+  LZMA_STATUS_FINISHED_WITH_MARK,          /* stream was finished with end mark. */
+  LZMA_STATUS_NOT_FINISHED,                /* stream was not finished */
+  LZMA_STATUS_NEEDS_MORE_INPUT,            /* you must provide more input bytes */
+  LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK  /* there is probability that stream was finished without end mark */
+} ELzmaStatus;
+
+/* ELzmaStatus is used only as output value for function call */
+
+/* ---------- One Call Interface ---------- */
+
+/* LzmaDecode
+
+finishMode:
+  It has meaning only if the decoding reaches output limit (*destLen).
+  LZMA_FINISH_ANY - Decode just destLen bytes.
+  LZMA_FINISH_END - Stream must be finished after (*destLen).
+
+Returns:
+  SZ_OK
+    status:
+      LZMA_STATUS_FINISHED_WITH_MARK
+      LZMA_STATUS_NOT_FINISHED
+      LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+  SZ_ERROR_DATA - Data error
+  SZ_ERROR_MEM  - Memory allocation error
+  SZ_ERROR_UNSUPPORTED - Unsupported properties
+  SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+    const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+    ELzmaStatus *status, ISzAlloc *alloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/LzmaEnc.h b/include/linux/lzma/LzmaEnc.h
new file mode 100644
index 000000000..2986c0460
--- /dev/null
+++ b/include/linux/lzma/LzmaEnc.h
@@ -0,0 +1,60 @@
+/*  LzmaEnc.h -- LZMA Encoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_ENC_H
+#define __LZMA_ENC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaEncProps
+{
+  int level;       /*  0 <= level <= 9 */
+  UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
+                      (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
+                       default = (1 << 24) */
+  int lc;          /* 0 <= lc <= 8, default = 3 */
+  int lp;          /* 0 <= lp <= 4, default = 0 */
+  int pb;          /* 0 <= pb <= 4, default = 2 */
+  int algo;        /* 0 - fast, 1 - normal, default = 1 */
+  int fb;          /* 5 <= fb <= 273, default = 32 */
+  int btMode;      /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
+  int numHashBytes; /* 2, 3 or 4, default = 4 */
+  UInt32 mc;        /* 1 <= mc <= (1 << 30), default = 32 */
+  unsigned writeEndMark;  /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
+  int numThreads;  /* 1 or 2, default = 2 */
+} CLzmaEncProps;
+
+void LzmaEncProps_Init(CLzmaEncProps *p);
+
+/* ---------- CLzmaEncHandle Interface ---------- */
+
+/* LzmaEnc_* functions can return the following exit codes:
+Returns:
+  SZ_OK           - OK
+  SZ_ERROR_MEM    - Memory allocation error
+  SZ_ERROR_PARAM  - Incorrect paramater in props
+  SZ_ERROR_WRITE  - Write callback error.
+  SZ_ERROR_PROGRESS - some break from progress callback
+  SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzmaEncHandle;
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+    int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/linux/lzma/Types.h b/include/linux/lzma/Types.h
new file mode 100644
index 000000000..4751acde0
--- /dev/null
+++ b/include/linux/lzma/Types.h
@@ -0,0 +1,226 @@
+/* Types.h -- Basic types
+2009-11-23 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_TYPES_H
+#define __7Z_TYPES_H
+
+#include <stddef.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#ifndef EXTERN_C_BEGIN
+#ifdef __cplusplus
+#define EXTERN_C_BEGIN extern "C" {
+#define EXTERN_C_END }
+#else
+#define EXTERN_C_BEGIN
+#define EXTERN_C_END
+#endif
+#endif
+
+EXTERN_C_BEGIN
+
+#define SZ_OK 0
+
+#define SZ_ERROR_DATA 1
+#define SZ_ERROR_MEM 2
+#define SZ_ERROR_CRC 3
+#define SZ_ERROR_UNSUPPORTED 4
+#define SZ_ERROR_PARAM 5
+#define SZ_ERROR_INPUT_EOF 6
+#define SZ_ERROR_OUTPUT_EOF 7
+#define SZ_ERROR_READ 8
+#define SZ_ERROR_WRITE 9
+#define SZ_ERROR_PROGRESS 10
+#define SZ_ERROR_FAIL 11
+#define SZ_ERROR_THREAD 12
+
+#define SZ_ERROR_ARCHIVE 16
+#define SZ_ERROR_NO_ARCHIVE 17
+
+typedef int SRes;
+
+#ifdef _WIN32
+typedef DWORD WRes;
+#else
+typedef int WRes;
+#endif
+
+#ifndef RINOK
+#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
+#endif
+
+typedef unsigned char Byte;
+typedef short Int16;
+typedef unsigned short UInt16;
+
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef long Int32;
+typedef unsigned long UInt32;
+#else
+typedef int Int32;
+typedef unsigned int UInt32;
+#endif
+
+#ifdef _SZ_NO_INT_64
+
+/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
+   NOTES: Some code will work incorrectly in that case! */
+
+typedef long Int64;
+typedef unsigned long UInt64;
+
+#else
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#else
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#endif
+
+#endif
+
+#ifdef _LZMA_NO_SYSTEM_SIZE_T
+typedef UInt32 SizeT;
+#else
+typedef size_t SizeT;
+#endif
+
+typedef int Bool;
+#define True 1
+#define False 0
+
+
+#ifdef _WIN32
+#define MY_STD_CALL __stdcall
+#else
+#define MY_STD_CALL
+#endif
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1300
+#define MY_NO_INLINE __declspec(noinline)
+#else
+#define MY_NO_INLINE
+#endif
+
+#define MY_CDECL __cdecl
+#define MY_FAST_CALL __fastcall
+
+#else
+
+#define MY_CDECL
+#define MY_FAST_CALL
+
+#endif
+
+
+/* The following interfaces use first parameter as pointer to structure */
+
+typedef struct
+{
+  SRes (*Read)(void *p, void *buf, size_t *size);
+    /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+       (output(*size) < input(*size)) is allowed */
+} ISeqInStream;
+
+/* it can return SZ_ERROR_INPUT_EOF */
+SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
+SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
+SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
+
+typedef struct
+{
+  size_t (*Write)(void *p, const void *buf, size_t size);
+    /* Returns: result - the number of actually written bytes.
+       (result < size) means error */
+} ISeqOutStream;
+
+typedef enum
+{
+  SZ_SEEK_SET = 0,
+  SZ_SEEK_CUR = 1,
+  SZ_SEEK_END = 2
+} ESzSeek;
+
+typedef struct
+{
+  SRes (*Read)(void *p, void *buf, size_t *size);  /* same as ISeqInStream::Read */
+  SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ISeekInStream;
+
+typedef struct
+{
+  SRes (*Look)(void *p, void **buf, size_t *size);
+    /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+       (output(*size) > input(*size)) is not allowed
+       (output(*size) < input(*size)) is allowed */
+  SRes (*Skip)(void *p, size_t offset);
+    /* offset must be <= output(*size) of Look */
+
+  SRes (*Read)(void *p, void *buf, size_t *size);
+    /* reads directly (without buffer). It's same as ISeqInStream::Read */
+  SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ILookInStream;
+
+SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
+SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
+
+/* reads via ILookInStream::Read */
+SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
+SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
+
+#define LookToRead_BUF_SIZE (1 << 14)
+
+typedef struct
+{
+  ILookInStream s;
+  ISeekInStream *realStream;
+  size_t pos;
+  size_t size;
+  Byte buf[LookToRead_BUF_SIZE];
+} CLookToRead;
+
+void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
+void LookToRead_Init(CLookToRead *p);
+
+typedef struct
+{
+  ISeqInStream s;
+  ILookInStream *realStream;
+} CSecToLook;
+
+void SecToLook_CreateVTable(CSecToLook *p);
+
+typedef struct
+{
+  ISeqInStream s;
+  ILookInStream *realStream;
+} CSecToRead;
+
+void SecToRead_CreateVTable(CSecToRead *p);
+
+typedef struct
+{
+  SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
+    /* Returns: result. (result != SZ_OK) means break.
+       Value (UInt64)(Int64)-1 for size means unknown value. */
+} ICompressProgress;
+
+typedef struct
+{
+  void *(*Alloc)(void *p, size_t size);
+  void (*Free)(void *p, void *address); /* address can be 0 */
+} ISzAlloc;
+
+#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
+#define IAlloc_Free(p, a) (p)->Free((p), a)
+
+EXTERN_C_END
+
+#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 0c3207d26..08b58474b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -159,6 +159,7 @@ extern void cleanup_module(void);
 
 /* Generic info of form tag = "info" */
 #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+#define MODULE_INFO_STRIP(tag, info) __MODULE_INFO_STRIP(tag, tag, info)
 
 /* For userspace: you can also call me... */
 #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
@@ -202,12 +203,12 @@ extern void cleanup_module(void);
  * Author(s), use "Name <email>" or just "Name", for multiple
  * authors use multiple MODULE_AUTHOR() statements/lines.
  */
-#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
+#define MODULE_AUTHOR(_author) MODULE_INFO_STRIP(author, _author)
 
 /* What your module does. */
-#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+#define MODULE_DESCRIPTION(_description) MODULE_INFO_STRIP(description, _description)
 
-#ifdef MODULE
+#if defined(MODULE) && !defined(CONFIG_MODULE_STRIPPED)
 /* Creates an alias so file2alias.c can find device table. */
 #define MODULE_DEVICE_TABLE(type, name)					\
 extern const typeof(name) __mod_##type##__##name##_device_table		\
@@ -234,7 +235,9 @@ extern const typeof(name) __mod_##type##__##name##_device_table		\
  */
 
 #if defined(MODULE) || !defined(CONFIG_SYSFS)
-#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#define MODULE_VERSION(_version) MODULE_INFO_STRIP(version, _version)
+#elif defined(CONFIG_MODULE_STRIPPED)
+#define MODULE_VERSION(_version) __MODULE_INFO_DISABLED(version)
 #else
 #define MODULE_VERSION(_version)					\
 	static struct module_version_attribute ___modver_attr = {	\
@@ -256,7 +259,7 @@ extern const typeof(name) __mod_##type##__##name##_device_table		\
 /* Optional firmware file (or files) needed by the module
  * format is simply firmware file name.  Multiple firmware
  * files require multiple MODULE_FIRMWARE() specifiers */
-#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_FIRMWARE(_firmware) MODULE_INFO_STRIP(firmware, _firmware)
 
 struct notifier_block;
 
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 52666d90c..f0db35929 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,6 +16,16 @@
 /* Chosen so that structs with an unsigned long line up. */
 #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
 
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO_DISABLED(name)					  \
+  struct __UNIQUE_ID(name) {}
+
+#ifdef CONFIG_MODULE_STRIPPED
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO_DISABLED(name)
+#else
+#define __MODULE_INFO_STRIP(tag, name, info) __MODULE_INFO(tag, name, info)
+#endif
+
 #ifdef MODULE
 #define __MODULE_INFO(tag, name, info)					  \
 static const char __UNIQUE_ID(name)[]					  \
@@ -23,8 +33,7 @@ static const char __UNIQUE_ID(name)[]					  \
   = __stringify(tag) "=" info
 #else  /* !MODULE */
 /* This struct is here for syntactic coherency, it is not used */
-#define __MODULE_INFO(tag, name, info)					  \
-  struct __UNIQUE_ID(name) {}
+#define __MODULE_INFO(tag, name, info) __MODULE_INFO_DISABLED(name)
 #endif
 #define __MODULE_PARM_TYPE(name, _type)					  \
   __MODULE_INFO(parmtype, name##type, #name ":" _type)
@@ -32,7 +41,7 @@ static const char __UNIQUE_ID(name)[]					  \
 /* One for each parameter, describing how to use it.  Some files do
    multiple of these per line, so can't just use MODULE_INFO. */
 #define MODULE_PARM_DESC(_parm, desc) \
-	__MODULE_INFO(parm, _parm, #_parm ":" desc)
+	__MODULE_INFO_STRIP(parm, _parm, #_parm ":" desc)
 
 struct kernel_param;
 
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 13f8052b9..5ce6ee9e5 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -322,6 +322,7 @@ struct mtd_info {
 	int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
 	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
 	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+	int (*_max_bad_blocks)(struct mtd_info *mtd, loff_t ofs, size_t len);
 	int (*_suspend) (struct mtd_info *mtd);
 	void (*_resume) (struct mtd_info *mtd);
 	void (*_reboot) (struct mtd_info *mtd);
@@ -397,6 +398,18 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
 	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
 }
 
+static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
+				     loff_t ofs, size_t len)
+{
+	if (!mtd->_max_bad_blocks)
+		return -ENOTSUPP;
+
+	if (mtd->size < (len + ofs) || ofs < 0)
+		return -EINVAL;
+
+	return mtd->_max_bad_blocks(mtd, ofs, len);
+}
+
 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
 			      struct mtd_pairing_info *info);
 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1e6..06df1e06b 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -41,6 +41,7 @@ struct mtd_partition {
 	uint64_t size;			/* partition size */
 	uint64_t offset;		/* offset within the master MTD space */
 	uint32_t mask_flags;		/* master MTD flags to mask out for this partition */
+	struct device_node *of_node;
 };
 
 #define MTDPART_OFS_RETAIN	(-3)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 23db1ae37..7411d0396 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1749,6 +1749,8 @@ struct net_device {
 	struct netdev_hw_addr_list	mc;
 	struct netdev_hw_addr_list	dev_addrs;
 
+	unsigned char		local_addr_mask[MAX_ADDR_LEN];
+
 #ifdef CONFIG_SYSFS
 	struct kset		*queues_kset;
 #endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 8431c8c0c..86b0bfb49 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -511,6 +511,12 @@ struct phy_driver {
 	 */
 	int (*did_interrupt)(struct phy_device *phydev);
 
+	/*
+	 * Called before an ethernet device is detached
+	 * from the PHY.
+	 */
+	void (*detach)(struct phy_device *phydev);
+
 	/* Clears up any memory if needed */
 	void (*remove)(struct phy_device *phydev);
 
@@ -820,6 +826,7 @@ int phy_ethtool_ksettings_get(struct phy_device *phydev,
 			      struct ethtool_link_ksettings *cmd);
 int phy_ethtool_ksettings_set(struct phy_device *phydev,
 			      const struct ethtool_link_ksettings *cmd);
+int phy_ethtool_ioctl(struct phy_device *phydev, void *useraddr);
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f66731324..95c7db294 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -135,7 +135,7 @@ struct reg_sequence {
 	pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
 })
 
-#ifdef CONFIG_REGMAP
+#if IS_ENABLED(CONFIG_REGMAP)
 
 enum regmap_endian {
 	/* Unspecified -> 0 -> Backwards compatible default */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1..ed5f744f0 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -64,7 +64,7 @@ struct rfkill_ops {
 	int	(*set_block)(void *data, bool blocked);
 };
 
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+#if IS_ENABLED(CONFIG_RFKILL_FULL)
 /**
  * rfkill_alloc - allocate rfkill structure
  * @name: name of the struct -- the string is not copied internally
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 344201437..7e6165bc1 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -448,7 +448,7 @@ extern void uart_handle_cts_change(struct uart_port *uport,
 extern void uart_insert_char(struct uart_port *port, unsigned int status,
 		 unsigned int overrun, unsigned int ch, unsigned int flag);
 
-#ifdef SUPPORT_SYSRQ
+#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL)
 static inline int
 uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 79782a87f..d1a3bb235 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -751,7 +751,8 @@ struct sk_buff {
 #ifdef CONFIG_NET_SWITCHDEV
 	__u8			offload_fwd_mark:1;
 #endif
-	/* 2, 4 or 5 bit hole */
+	__u8			gro_skip:1;
+	/* 1, 3 or 4 bit hole */
 
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
@@ -2307,7 +2308,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
 #ifndef NET_SKB_PAD
-#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
+#define NET_SKB_PAD	max(64, L1_CACHE_BYTES)
 #endif
 
 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 647532b0e..089c7f6fc 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -192,7 +192,6 @@ struct tcp_sock {
 	u32	tsoffset;	/* timestamp offset */
 
 	struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
-	unsigned long	tsq_flags;
 
 	/* Data for direct copy to user */
 	struct {
@@ -367,7 +366,7 @@ struct tcp_sock {
 	u32	*saved_syn;
 };
 
-enum tsq_flags {
+enum tsq_enum {
 	TSQ_THROTTLED,
 	TSQ_QUEUED,
 	TCP_TSQ_DEFERRED,	   /* tcp_tasklet_func() found socket was owned */
@@ -378,6 +377,15 @@ enum tsq_flags {
 				    */
 };
 
+enum tsq_flags {
+	TSQF_THROTTLED			= (1UL << TSQ_THROTTLED),
+	TSQF_QUEUED			= (1UL << TSQ_QUEUED),
+	TCPF_TSQ_DEFERRED		= (1UL << TCP_TSQ_DEFERRED),
+	TCPF_WRITE_TIMER_DEFERRED	= (1UL << TCP_WRITE_TIMER_DEFERRED),
+	TCPF_DELACK_TIMER_DEFERRED	= (1UL << TCP_DELACK_TIMER_DEFERRED),
+	TCPF_MTU_REDUCED_DEFERRED	= (1UL << TCP_MTU_REDUCED_DEFERRED),
+};
+
 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
 {
 	return (struct tcp_sock *)sk;
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index db0431b39..6cf7fc149 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -49,6 +49,7 @@ struct usb_ehci_pdata {
 	unsigned	no_io_watchdog:1;
 	unsigned	reset_on_resume:1;
 	unsigned	dma_mask_64:1;
+	unsigned	ignore_oc:1;
 
 	/* Turn on all power and clocks */
 	int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 492034126..40084ad92 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -461,7 +461,14 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
 extern void usb_hcd_pci_remove(struct pci_dev *dev);
 extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
 
+#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
 extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+#else
+static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev)
+{
+	return 0;
+}
+#endif
 
 #ifdef CONFIG_PM
 extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 1b1cf33cb..7956a7c27 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -17,6 +17,18 @@
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
+/* IPv6 tunnel FMR */
+struct __ip6_tnl_fmr {
+	struct __ip6_tnl_fmr *next; /* next fmr in list */
+	struct in6_addr ip6_prefix;
+	struct in_addr ip4_prefix;
+
+	__u8 ip6_prefix_len;
+	__u8 ip4_prefix_len;
+	__u8 ea_len;
+	__u8 offset;
+};
+
 struct __ip6_tnl_parm {
 	char name[IFNAMSIZ];	/* name of tunnel device */
 	int link;		/* ifindex of underlying L2 interface */
@@ -28,6 +40,7 @@ struct __ip6_tnl_parm {
 	__u32 flags;		/* tunnel flags */
 	struct in6_addr laddr;	/* local tunnel end-point address */
 	struct in6_addr raddr;	/* remote tunnel end-point address */
+	struct __ip6_tnl_fmr *fmrs;	/* FMRs */
 
 	__be16			i_flags;
 	__be16			o_flags;
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 1c3035dda..0988c7dfa 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -26,6 +26,9 @@ enum nf_ct_ext_id {
 #endif
 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
 	NF_CT_EXT_SYNPROXY,
+#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_RTCACHE)
+	NF_CT_EXT_RTCACHE,
 #endif
 	NF_CT_EXT_NUM,
 };
@@ -39,6 +42,7 @@ enum nf_ct_ext_id {
 #define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
 #define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
 #define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
+#define NF_CT_EXT_RTCACHE_TYPE struct nf_conn_rtcache
 
 /* Extensions: optional stuff which isn't permanently in struct. */
 struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_rtcache.h b/include/net/netfilter/nf_conntrack_rtcache.h
new file mode 100644
index 000000000..e2fb30243
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_rtcache.h
@@ -0,0 +1,34 @@
+#include <linux/gfp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct dst_entry;
+
+struct nf_conn_dst_cache {
+	struct dst_entry *dst;
+	int iif;
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+	u32 cookie;
+#endif
+
+};
+
+struct nf_conn_rtcache {
+	struct nf_conn_dst_cache cached_dst[IP_CT_DIR_MAX];
+};
+
+static inline
+struct nf_conn_rtcache *nf_ct_rtcache_find(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_RTCACHE)
+	return nf_ct_ext_find(ct, NF_CT_EXT_RTCACHE);
+#else
+	return NULL;
+#endif
+}
+
+static inline int nf_conn_rtcache_iif_get(const struct nf_conn_rtcache *rtc,
+					  enum ip_conntrack_dir dir)
+{
+	return rtc->cached_dst[dir].iif;
+}
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 10d0848f5..29023a93c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -66,6 +66,7 @@ struct netns_ipv6 {
 	unsigned long		 ip6_rt_last_gc;
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	struct rt6_info         *ip6_prohibit_entry;
+	struct rt6_info		*ip6_policy_failed_entry;
 	struct rt6_info         *ip6_blk_hole_entry;
 	struct fib6_table       *fib6_local_tbl;
 	struct fib_rules_ops    *fib6_rules_ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index 92b269709..a186749de 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -343,6 +343,9 @@ struct sock {
 #define sk_rxhash		__sk_common.skc_rxhash
 
 	socket_lock_t		sk_lock;
+	atomic_t		sk_drops;
+	int			sk_rcvlowat;
+	struct sk_buff_head	sk_error_queue;
 	struct sk_buff_head	sk_receive_queue;
 	/*
 	 * The backlog queue is special, it is always used with
@@ -359,14 +362,13 @@ struct sock {
 		struct sk_buff	*tail;
 	} sk_backlog;
 #define sk_rmem_alloc sk_backlog.rmem_alloc
-	int			sk_forward_alloc;
 
-	__u32			sk_txhash;
+	int			sk_forward_alloc;
 #ifdef CONFIG_NET_RX_BUSY_POLL
-	unsigned int		sk_napi_id;
 	unsigned int		sk_ll_usec;
+	/* ===== mostly read cache line ===== */
+	unsigned int		sk_napi_id;
 #endif
-	atomic_t		sk_drops;
 	int			sk_rcvbuf;
 
 	struct sk_filter __rcu	*sk_filter;
@@ -379,11 +381,30 @@ struct sock {
 #endif
 	struct dst_entry	*sk_rx_dst;
 	struct dst_entry __rcu	*sk_dst_cache;
-	/* Note: 32bit hole on 64bit arches */
-	atomic_t		sk_wmem_alloc;
 	atomic_t		sk_omem_alloc;
 	int			sk_sndbuf;
+
+	/* ===== cache line for TX ===== */
+	int			sk_wmem_queued;
+	atomic_t		sk_wmem_alloc;
+	unsigned long		sk_tsq_flags;
+	struct sk_buff		*sk_send_head;
 	struct sk_buff_head	sk_write_queue;
+	__s32			sk_peek_off;
+	int			sk_write_pending;
+	long			sk_sndtimeo;
+	struct timer_list	sk_timer;
+	__u32			sk_priority;
+	__u32			sk_mark;
+	u32			sk_pacing_rate; /* bytes per second */
+	u32			sk_max_pacing_rate;
+	struct page_frag	sk_frag;
+	netdev_features_t	sk_route_caps;
+	netdev_features_t	sk_route_nocaps;
+	int			sk_gso_type;
+	unsigned int		sk_gso_max_size;
+	gfp_t			sk_allocation;
+	__u32			sk_txhash;
 
 	/*
 	 * Because of non atomicity rules, all
@@ -399,41 +420,23 @@ struct sock {
 #define SK_PROTOCOL_MAX U8_MAX
 	kmemcheck_bitfield_end(flags);
 
-	int			sk_wmem_queued;
-	gfp_t			sk_allocation;
-	u32			sk_pacing_rate; /* bytes per second */
-	u32			sk_max_pacing_rate;
-	netdev_features_t	sk_route_caps;
-	netdev_features_t	sk_route_nocaps;
-	int			sk_gso_type;
-	unsigned int		sk_gso_max_size;
 	u16			sk_gso_max_segs;
-	int			sk_rcvlowat;
 	unsigned long	        sk_lingertime;
-	struct sk_buff_head	sk_error_queue;
 	struct proto		*sk_prot_creator;
 	rwlock_t		sk_callback_lock;
 	int			sk_err,
 				sk_err_soft;
 	u32			sk_ack_backlog;
 	u32			sk_max_ack_backlog;
-	__u32			sk_priority;
-	__u32			sk_mark;
 	struct pid		*sk_peer_pid;
 	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
-	long			sk_sndtimeo;
-	struct timer_list	sk_timer;
 	ktime_t			sk_stamp;
 	u16			sk_tsflags;
 	u8			sk_shutdown;
 	u32			sk_tskey;
 	struct socket		*sk_socket;
 	void			*sk_user_data;
-	struct page_frag	sk_frag;
-	struct sk_buff		*sk_send_head;
-	__s32			sk_peek_off;
-	int			sk_write_pending;
 #ifdef CONFIG_SECURITY
 	void			*sk_security;
 #endif
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 14404b3eb..b54d4140c 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -66,9 +66,15 @@ enum {
 	FR_ACT_BLACKHOLE,	/* Drop without notification */
 	FR_ACT_UNREACHABLE,	/* Drop with ENETUNREACH */
 	FR_ACT_PROHIBIT,	/* Drop with EACCES */
+	FR_ACT_RES9,
+	FR_ACT_RES10,
+	FR_ACT_RES11,
+	FR_ACT_POLICY_FAILED,	/* Drop with EACCES */
 	__FR_ACT_MAX,
 };
 
+#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED
+
 #define FR_ACT_MAX (__FR_ACT_MAX - 1)
 
 #endif
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index 590beda78..435c5c1c6 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -118,6 +118,8 @@ struct icmp6hdr {
 #define ICMPV6_POLICY_FAIL		5
 #define ICMPV6_REJECT_ROUTE		6
 
+#define ICMPV6_FAILED_POLICY		ICMPV6_POLICY_FAIL
+
 /*
  *	Codes for Time Exceeded
  */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index b4fba662c..ee4d632d0 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -319,6 +319,7 @@ enum {
 	IFLA_BRPORT_MULTICAST_ROUTER,
 	IFLA_BRPORT_PAD,
 	IFLA_BRPORT_MCAST_FLOOD,
+	IFLA_BRPORT_MCAST_TO_UCAST,
 	__IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 9e7edfd81..40fdf8907 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -31,6 +31,8 @@ struct sockaddr_ll {
 #define PACKET_KERNEL		7		/* To kernel space	*/
 /* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
 #define PACKET_FASTROUTE	6		/* Fastrouted frame	*/
+#define PACKET_MASK_ANY		0xffffffff	/* mask for packet type bits */
+
 
 /* Packet socket options */
 
@@ -56,6 +58,7 @@ struct sockaddr_ll {
 #define PACKET_QDISC_BYPASS		20
 #define PACKET_ROLLOVER_STATS		21
 #define PACKET_FANOUT_DATA		22
+#define PACKET_RECV_TYPE		23
 
 #define PACKET_FANOUT_HASH		0
 #define PACKET_FANOUT_LB		1
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 92f3c8677..ee42bf29c 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -75,10 +75,23 @@ enum {
 	IFLA_IPTUN_ENCAP_SPORT,
 	IFLA_IPTUN_ENCAP_DPORT,
 	IFLA_IPTUN_COLLECT_METADATA,
+	IFLA_IPTUN_FMRS,
 	__IFLA_IPTUN_MAX,
 };
 #define IFLA_IPTUN_MAX	(__IFLA_IPTUN_MAX - 1)
 
+enum {
+	IFLA_IPTUN_FMR_UNSPEC,
+	IFLA_IPTUN_FMR_IP6_PREFIX,
+	IFLA_IPTUN_FMR_IP4_PREFIX,
+	IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
+	IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
+	IFLA_IPTUN_FMR_EA_LEN,
+	IFLA_IPTUN_FMR_OFFSET,
+	__IFLA_IPTUN_FMR_MAX,
+};
+#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
+
 enum tunnel_encap_types {
 	TUNNEL_ENCAP_NONE,
 	TUNNEL_ENCAP_FOU,
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f4..f3431a5ff 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -46,6 +46,7 @@
 #define JFFS2_COMPR_DYNRUBIN	0x05
 #define JFFS2_COMPR_ZLIB	0x06
 #define JFFS2_COMPR_LZO		0x07
+#define JFFS2_COMPR_LZMA	0x08
 /* Compatibility flags. */
 #define JFFS2_COMPAT_MASK 0xc000      /* What do to if an unknown nodetype is found */
 #define JFFS2_NODE_ACCURATE 0x2000
diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h
index d0da53d96..f279daa13 100644
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -88,6 +88,7 @@ struct ipt_ip {
 #define IPT_F_FRAG		0x01	/* Set if rule is a fragment rule */
 #define IPT_F_GOTO		0x02	/* Set if jump is a goto */
 #define IPT_F_MASK		0x03	/* All possible flag bits mask. */
+#define IPT_F_NO_DEF_MATCH	0x80	/* Internal: no default match rules present */
 
 /* Values for "inv" field in struct ipt_ip. */
 #define IPT_INV_VIA_IN		0x01	/* Invert the sense of IN IFACE. */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 5a78be518..83b6c3e22 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -215,9 +215,12 @@ enum {
 	RTN_THROW,		/* Not in this table		*/
 	RTN_NAT,		/* Translate this address	*/
 	RTN_XRESOLVE,		/* Use external resolver	*/
+	RTN_POLICY_FAILED,	/* Failed ingress/egress policy */
 	__RTN_MAX
 };
 
+#define RTN_FAILED_POLICY RTN_POLICY_FAILED
+
 #define RTN_MAX (__RTN_MAX - 1)
 
 
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e75..b481bf37c 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -111,7 +111,7 @@ struct spi_ioc_transfer {
 
 /* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */
 #define SPI_MSGSIZE(N) \
-	((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \
+	((((N)*(sizeof(struct spi_ioc_transfer))) < (1 << 13)) \
 		? ((N)*(sizeof (struct spi_ioc_transfer))) : 0)
 #define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])
 
diff --git a/init/Kconfig b/init/Kconfig
index 34407f15e..5adca0615 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1370,6 +1370,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
 	  the unaligned access emulation.
 	  see arch/parisc/kernel/unaligned.c for reference
 
+config KALLSYMS_UNCOMPRESSED
+	bool "Keep kallsyms uncompressed"
+	depends on KALLSYMS
+	help
+		Normally kallsyms contains compressed symbols (using a token table),
+		reducing the uncompressed kernel image size. Keeping the symbol table
+		uncompressed significantly improves the size of this part in compressed
+		kernel images.
+
+		Say N unless you need compressed kernel images to be small.
+
 config HAVE_PCSPKR_PLATFORM
 	bool
 
@@ -2095,6 +2106,13 @@ config TRIM_UNUSED_KSYMS
 
 	  If unsure, or if you need to build out-of-tree modules, say N.
 
+config MODULE_STRIPPED
+	bool "Reduce module size"
+	depends on MODULES
+	help
+	  Remove module parameter descriptions, author info, version, aliases,
+	  device tables, etc.
+
 endif # MODULES
 
 config MODULES_TREE_LOOKUP
diff --git a/init/do_mounts.c b/init/do_mounts.c
index dea5de95c..deb339ab7 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -438,7 +438,28 @@ retry:
 out:
 	put_page(page);
 }
- 
+
+static int __init mount_ubi_rootfs(void)
+{
+	int flags = MS_SILENT;
+	int err, tried = 0;
+
+	while (tried < 2) {
+		err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
+					root_mount_data);
+		switch (err) {
+			case -EACCES:
+				flags |= MS_RDONLY;
+				tried++;
+				break;
+			default:
+				return err;
+		}
+	}
+
+	return -EINVAL;
+}
+
 #ifdef CONFIG_ROOT_NFS
 
 #define NFSROOT_TIMEOUT_MIN	5
@@ -532,6 +553,10 @@ void __init mount_root(void)
 			change_floppy("root floppy");
 	}
 #endif
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+	if (!mount_ubi_rootfs())
+		return;
+#endif
 #ifdef CONFIG_BLOCK
 	{
 		int err = create_dev("/dev/root", ROOT_DEV);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index fafd1a3ef..abe495792 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -113,6 +113,11 @@ static unsigned int kallsyms_expand_symbol(unsigned int off,
 	 * For every byte on the compressed symbol data, copy the table
 	 * entry for that byte.
 	 */
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+	memcpy(result, data + 1, len - 1);
+	result += len - 1;
+	len = 0;
+#endif
 	while (len) {
 		tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
 		data++;
@@ -145,6 +150,9 @@ tail:
  */
 static char kallsyms_get_symbol_type(unsigned int off)
 {
+#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
+	return kallsyms_names[off + 1];
+#endif
 	/*
 	 * Get just the first code, look it up in the token table,
 	 * and return the first char from this token.
diff --git a/kernel/module.c b/kernel/module.c
index 0e54d5bf0..80b5ac418 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2945,9 +2945,11 @@ static struct module *setup_load_info(struct load_info *info, int flags)
 
 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
 {
-	const char *modmagic = get_modinfo(info, "vermagic");
 	int err;
 
+#ifndef CONFIG_MODULE_STRIPPED
+	const char *modmagic = get_modinfo(info, "vermagic");
+
 	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
 		modmagic = NULL;
 
@@ -2968,6 +2970,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
 				mod->name);
 		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
 	}
+#endif
 
 	if (get_modinfo(info, "staging")) {
 		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
diff --git a/lib/Kconfig b/lib/Kconfig
index 1e4de2b65..b68a19a98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -241,6 +241,12 @@ config LZ4_DECOMPRESS
 
 source "lib/xz/Kconfig"
 
+config LZMA_COMPRESS
+    tristate
+
+config LZMA_DECOMPRESS
+    tristate
+
 #
 # These all provide a common interface (hence the apparent duplication with
 # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f60e67217..3349498e9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -397,6 +397,11 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
 	  This may be set to 1 or 0 to enable or disable them all, or
 	  to a bitmask as described in Documentation/sysrq.txt.
 
+config MAGIC_SYSRQ_SERIAL
+	bool "Enable magic SysRq key over serial"
+	depends on MAGIC_SYSRQ
+	default y
+
 config DEBUG_KERNEL
 	bool "Kernel debugging"
 	help
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3ae..603630df2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,6 +2,16 @@
 # Makefile for some libs needed in the kernel.
 #
 
+ifdef CONFIG_JFFS2_ZLIB
+	CONFIG_ZLIB_INFLATE:=y
+	CONFIG_ZLIB_DEFLATE:=y
+endif
+
+ifdef CONFIG_JFFS2_LZMA
+	CONFIG_LZMA_DECOMPRESS:=y
+	CONFIG_LZMA_COMPRESS:=y
+endif
+
 ifdef CONFIG_FUNCTION_TRACER
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
@@ -108,6 +118,8 @@ obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
 obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
 obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
+obj-$(CONFIG_LZMA_COMPRESS) += lzma/
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
 
 lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
 lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
diff --git a/lib/decompress.c b/lib/decompress.c
index 62696dff5..d91a26e1a 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -48,6 +48,7 @@ static const struct compress_format compressed_formats[] __initconst = {
 	{ {0x1f, 0x9e}, "gzip", gunzip },
 	{ {0x42, 0x5a}, "bzip2", bunzip2 },
 	{ {0x5d, 0x00}, "lzma", unlzma },
+	{ {0x6d, 0x00}, "lzma-openwrt", unlzma },
 	{ {0xfd, 0x37}, "xz", unxz },
 	{ {0x89, 0x4c}, "lzo", unlzo },
 	{ {0x02, 0x21}, "lz4", unlz4 },
diff --git a/lib/lzma/LzFind.c b/lib/lzma/LzFind.c
new file mode 100644
index 000000000..e67de9d23
--- /dev/null
+++ b/lib/lzma/LzFind.c
@@ -0,0 +1,522 @@
+/* LzFind.c -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+#include "LzFind.h"
+#include "LzHash.h"
+
+#define kEmptyHashValue 0
+#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
+#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
+#define kNormalizeMask (~(kNormalizeStepMin - 1))
+#define kMaxHistorySize ((UInt32)3 << 30)
+
+#define kStartMaxLen 3
+
+#if 0
+#define DIRECT_INPUT	p->directInput
+#else
+#define DIRECT_INPUT	1
+#endif
+
+static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+  if (!DIRECT_INPUT)
+  {
+    alloc->Free(alloc, p->bufferBase);
+    p->bufferBase = 0;
+  }
+}
+
+/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
+
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
+{
+  UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
+  if (DIRECT_INPUT)
+  {
+    p->blockSize = blockSize;
+    return 1;
+  }
+  if (p->bufferBase == 0 || p->blockSize != blockSize)
+  {
+    LzInWindow_Free(p, alloc);
+    p->blockSize = blockSize;
+    p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
+  }
+  return (p->bufferBase != 0);
+}
+
+static Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+static Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
+
+static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+static void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+{
+  p->posLimit -= subValue;
+  p->pos -= subValue;
+  p->streamPos -= subValue;
+}
+
+static void MatchFinder_ReadBlock(CMatchFinder *p)
+{
+  if (p->streamEndWasReached || p->result != SZ_OK)
+    return;
+  if (DIRECT_INPUT)
+  {
+    UInt32 curSize = 0xFFFFFFFF - p->streamPos;
+    if (curSize > p->directInputRem)
+      curSize = (UInt32)p->directInputRem;
+    p->directInputRem -= curSize;
+    p->streamPos += curSize;
+    if (p->directInputRem == 0)
+      p->streamEndWasReached = 1;
+    return;
+  }
+  for (;;)
+  {
+    Byte *dest = p->buffer + (p->streamPos - p->pos);
+    size_t size = (p->bufferBase + p->blockSize - dest);
+    if (size == 0)
+      return;
+    p->result = p->stream->Read(p->stream, dest, &size);
+    if (p->result != SZ_OK)
+      return;
+    if (size == 0)
+    {
+      p->streamEndWasReached = 1;
+      return;
+    }
+    p->streamPos += (UInt32)size;
+    if (p->streamPos - p->pos > p->keepSizeAfter)
+      return;
+  }
+}
+
+static void MatchFinder_MoveBlock(CMatchFinder *p)
+{
+  memmove(p->bufferBase,
+    p->buffer - p->keepSizeBefore,
+    (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
+  p->buffer = p->bufferBase + p->keepSizeBefore;
+}
+
+static int MatchFinder_NeedMove(CMatchFinder *p)
+{
+  if (DIRECT_INPUT)
+    return 0;
+  /* if (p->streamEndWasReached) return 0; */
+  return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
+}
+
+static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
+{
+  if (MatchFinder_NeedMove(p))
+    MatchFinder_MoveBlock(p);
+  MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
+{
+  p->cutValue = 32;
+  p->btMode = 1;
+  p->numHashBytes = 4;
+  p->bigHash = 0;
+}
+
+#define kCrcPoly 0xEDB88320
+
+void MatchFinder_Construct(CMatchFinder *p)
+{
+  UInt32 i;
+  p->bufferBase = 0;
+  p->directInput = 0;
+  p->hash = 0;
+  MatchFinder_SetDefaultSettings(p);
+
+  for (i = 0; i < 256; i++)
+  {
+    UInt32 r = i;
+    int j;
+    for (j = 0; j < 8; j++)
+      r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
+    p->crc[i] = r;
+  }
+}
+
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->hash);
+  p->hash = 0;
+}
+
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+  MatchFinder_FreeThisClassMemory(p, alloc);
+  LzInWindow_Free(p, alloc);
+}
+
+static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
+{
+  size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
+  if (sizeInBytes / sizeof(CLzRef) != num)
+    return 0;
+  return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
+}
+
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+    UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+    ISzAlloc *alloc)
+{
+  UInt32 sizeReserv;
+  if (historySize > kMaxHistorySize)
+  {
+    MatchFinder_Free(p, alloc);
+    return 0;
+  }
+  sizeReserv = historySize >> 1;
+  if (historySize > ((UInt32)2 << 30))
+    sizeReserv = historySize >> 2;
+  sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
+
+  p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
+  p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
+  /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
+  if (LzInWindow_Create(p, sizeReserv, alloc))
+  {
+    UInt32 newCyclicBufferSize = historySize + 1;
+    UInt32 hs;
+    p->matchMaxLen = matchMaxLen;
+    {
+      p->fixedHashSize = 0;
+      if (p->numHashBytes == 2)
+        hs = (1 << 16) - 1;
+      else
+      {
+        hs = historySize - 1;
+        hs |= (hs >> 1);
+        hs |= (hs >> 2);
+        hs |= (hs >> 4);
+        hs |= (hs >> 8);
+        hs >>= 1;
+        hs |= 0xFFFF; /* don't change it! It's required for Deflate */
+        if (hs > (1 << 24))
+        {
+          if (p->numHashBytes == 3)
+            hs = (1 << 24) - 1;
+          else
+            hs >>= 1;
+        }
+      }
+      p->hashMask = hs;
+      hs++;
+      if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
+      if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
+      if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
+      hs += p->fixedHashSize;
+    }
+
+    {
+      UInt32 prevSize = p->hashSizeSum + p->numSons;
+      UInt32 newSize;
+      p->historySize = historySize;
+      p->hashSizeSum = hs;
+      p->cyclicBufferSize = newCyclicBufferSize;
+      p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
+      newSize = p->hashSizeSum + p->numSons;
+      if (p->hash != 0 && prevSize == newSize)
+        return 1;
+      MatchFinder_FreeThisClassMemory(p, alloc);
+      p->hash = AllocRefs(newSize, alloc);
+      if (p->hash != 0)
+      {
+        p->son = p->hash + p->hashSizeSum;
+        return 1;
+      }
+    }
+  }
+  MatchFinder_Free(p, alloc);
+  return 0;
+}
+
+static void MatchFinder_SetLimits(CMatchFinder *p)
+{
+  UInt32 limit = kMaxValForNormalize - p->pos;
+  UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
+  if (limit2 < limit)
+    limit = limit2;
+  limit2 = p->streamPos - p->pos;
+  if (limit2 <= p->keepSizeAfter)
+  {
+    if (limit2 > 0)
+      limit2 = 1;
+  }
+  else
+    limit2 -= p->keepSizeAfter;
+  if (limit2 < limit)
+    limit = limit2;
+  {
+    UInt32 lenLimit = p->streamPos - p->pos;
+    if (lenLimit > p->matchMaxLen)
+      lenLimit = p->matchMaxLen;
+    p->lenLimit = lenLimit;
+  }
+  p->posLimit = p->pos + limit;
+}
+
+static void MatchFinder_Init(CMatchFinder *p)
+{
+  UInt32 i;
+  for (i = 0; i < p->hashSizeSum; i++)
+    p->hash[i] = kEmptyHashValue;
+  p->cyclicBufferPos = 0;
+  p->buffer = p->bufferBase;
+  p->pos = p->streamPos = p->cyclicBufferSize;
+  p->result = SZ_OK;
+  p->streamEndWasReached = 0;
+  MatchFinder_ReadBlock(p);
+  MatchFinder_SetLimits(p);
+}
+
+static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
+{
+  return (p->pos - p->historySize - 1) & kNormalizeMask;
+}
+
+static void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
+{
+  UInt32 i;
+  for (i = 0; i < numItems; i++)
+  {
+    UInt32 value = items[i];
+    if (value <= subValue)
+      value = kEmptyHashValue;
+    else
+      value -= subValue;
+    items[i] = value;
+  }
+}
+
+static void MatchFinder_Normalize(CMatchFinder *p)
+{
+  UInt32 subValue = MatchFinder_GetSubValue(p);
+  MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
+  MatchFinder_ReduceOffsets(p, subValue);
+}
+
+static void MatchFinder_CheckLimits(CMatchFinder *p)
+{
+  if (p->pos == kMaxValForNormalize)
+    MatchFinder_Normalize(p);
+  if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
+    MatchFinder_CheckAndMoveAndRead(p);
+  if (p->cyclicBufferPos == p->cyclicBufferSize)
+    p->cyclicBufferPos = 0;
+  MatchFinder_SetLimits(p);
+}
+
+static UInt32 *GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+    UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+    UInt32 *distances, UInt32 maxLen)
+{
+  CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+  CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+  UInt32 len0 = 0, len1 = 0;
+  for (;;)
+  {
+    UInt32 delta = pos - curMatch;
+    if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+    {
+      *ptr0 = *ptr1 = kEmptyHashValue;
+      return distances;
+    }
+    {
+      CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+      const Byte *pb = cur - delta;
+      UInt32 len = (len0 < len1 ? len0 : len1);
+      if (pb[len] == cur[len])
+      {
+        if (++len != lenLimit && pb[len] == cur[len])
+          while (++len != lenLimit)
+            if (pb[len] != cur[len])
+              break;
+        if (maxLen < len)
+        {
+          *distances++ = maxLen = len;
+          *distances++ = delta - 1;
+          if (len == lenLimit)
+          {
+            *ptr1 = pair[0];
+            *ptr0 = pair[1];
+            return distances;
+          }
+        }
+      }
+      if (pb[len] < cur[len])
+      {
+        *ptr1 = curMatch;
+        ptr1 = pair + 1;
+        curMatch = *ptr1;
+        len1 = len;
+      }
+      else
+      {
+        *ptr0 = curMatch;
+        ptr0 = pair;
+        curMatch = *ptr0;
+        len0 = len;
+      }
+    }
+  }
+}
+
+static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+    UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
+{
+  CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+  CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+  UInt32 len0 = 0, len1 = 0;
+  for (;;)
+  {
+    UInt32 delta = pos - curMatch;
+    if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+    {
+      *ptr0 = *ptr1 = kEmptyHashValue;
+      return;
+    }
+    {
+      CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+      const Byte *pb = cur - delta;
+      UInt32 len = (len0 < len1 ? len0 : len1);
+      if (pb[len] == cur[len])
+      {
+        while (++len != lenLimit)
+          if (pb[len] != cur[len])
+            break;
+        {
+          if (len == lenLimit)
+          {
+            *ptr1 = pair[0];
+            *ptr0 = pair[1];
+            return;
+          }
+        }
+      }
+      if (pb[len] < cur[len])
+      {
+        *ptr1 = curMatch;
+        ptr1 = pair + 1;
+        curMatch = *ptr1;
+        len1 = len;
+      }
+      else
+      {
+        *ptr0 = curMatch;
+        ptr0 = pair;
+        curMatch = *ptr0;
+        len0 = len;
+      }
+    }
+  }
+}
+
+#define MOVE_POS \
+  ++p->cyclicBufferPos; \
+  p->buffer++; \
+  if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
+
+static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
+
+#define MOVE_POS_RET MatchFinder_MovePos(p); return offset;
+
+#define GET_MATCHES_HEADER2(minLen, ret_op) \
+  UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
+  lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
+  cur = p->buffer;
+
+#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
+#define SKIP_HEADER(minLen)        GET_MATCHES_HEADER2(minLen, continue)
+
+#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
+
+#define GET_MATCHES_FOOTER(offset, maxLen) \
+  offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
+  distances + offset, maxLen) - distances); MOVE_POS_RET;
+
+#define SKIP_FOOTER \
+  SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MatchFinder_MovePos(p);
+
+static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+  UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
+  GET_MATCHES_HEADER(4)
+
+  HASH4_CALC;
+
+  delta2 = p->pos - p->hash[                hash2Value];
+  delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
+  curMatch = p->hash[kFix4HashSize + hashValue];
+  
+  p->hash[                hash2Value] =
+  p->hash[kFix3HashSize + hash3Value] =
+  p->hash[kFix4HashSize + hashValue] = p->pos;
+
+  maxLen = 1;
+  offset = 0;
+  if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+  {
+    distances[0] = maxLen = 2;
+    distances[1] = delta2 - 1;
+    offset = 2;
+  }
+  if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
+  {
+    maxLen = 3;
+    distances[offset + 1] = delta3 - 1;
+    offset += 2;
+    delta2 = delta3;
+  }
+  if (offset != 0)
+  {
+    for (; maxLen != lenLimit; maxLen++)
+      if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+        break;
+    distances[offset - 2] = maxLen;
+    if (maxLen == lenLimit)
+    {
+      SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+      MOVE_POS_RET;
+    }
+  }
+  if (maxLen < 3)
+    maxLen = 3;
+  GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+  do
+  {
+    UInt32 hash2Value, hash3Value;
+    SKIP_HEADER(4)
+    HASH4_CALC;
+    curMatch = p->hash[kFix4HashSize + hashValue];
+    p->hash[                hash2Value] =
+    p->hash[kFix3HashSize + hash3Value] = p->pos;
+    p->hash[kFix4HashSize + hashValue] = p->pos;
+    SKIP_FOOTER
+  }
+  while (--num != 0);
+}
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
+{
+  vTable->Init = (Mf_Init_Func)MatchFinder_Init;
+  vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
+  vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
+  vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
+  vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
+  vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
+}
diff --git a/lib/lzma/LzmaDec.c b/lib/lzma/LzmaDec.c
new file mode 100644
index 000000000..93ce19864
--- /dev/null
+++ b/lib/lzma/LzmaDec.c
@@ -0,0 +1,925 @@
+/* LzmaDec.c -- LZMA Decoder
+2009-09-20 : Igor Pavlov : Public domain */
+
+#include "LzmaDec.h"
+
+#include <string.h>
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_INIT_SIZE 5
+
+#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
+#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
+#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
+  { UPDATE_0(p); i = (i + i); A0; } else \
+  { UPDATE_1(p); i = (i + i) + 1; A1; }
+#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
+
+#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
+#define TREE_DECODE(probs, limit, i) \
+  { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
+
+/* #define _LZMA_SIZE_OPT */
+
+#ifdef _LZMA_SIZE_OPT
+#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
+#else
+#define TREE_6_DECODE(probs, i) \
+  { i = 1; \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  TREE_GET_BIT(probs, i); \
+  i -= 0x40; }
+#endif
+
+#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0_CHECK range = bound;
+#define UPDATE_1_CHECK range -= bound; code -= bound;
+#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
+  { UPDATE_0_CHECK; i = (i + i); A0; } else \
+  { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
+#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
+#define TREE_DECODE_CHECK(probs, limit, i) \
+  { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#define LZMA_DIC_MIN (1 << 12)
+
+/* First LZMA-symbol is always decoded.
+And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
+Out:
+  Result:
+    SZ_OK - OK
+    SZ_ERROR_DATA - Error
+  p->remainLen:
+    < kMatchSpecLenStart : normal remain
+    = kMatchSpecLenStart : finished
+    = kMatchSpecLenStart + 1 : Flush marker
+    = kMatchSpecLenStart + 2 : State Init Marker
+*/
+
+static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+  CLzmaProb *probs = p->probs;
+
+  unsigned state = p->state;
+  UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
+  unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
+  unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
+  unsigned lc = p->prop.lc;
+
+  Byte *dic = p->dic;
+  SizeT dicBufSize = p->dicBufSize;
+  SizeT dicPos = p->dicPos;
+  
+  UInt32 processedPos = p->processedPos;
+  UInt32 checkDicSize = p->checkDicSize;
+  unsigned len = 0;
+
+  const Byte *buf = p->buf;
+  UInt32 range = p->range;
+  UInt32 code = p->code;
+
+  do
+  {
+    CLzmaProb *prob;
+    UInt32 bound;
+    unsigned ttt;
+    unsigned posState = processedPos & pbMask;
+
+    prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+    IF_BIT_0(prob)
+    {
+      unsigned symbol;
+      UPDATE_0(prob);
+      prob = probs + Literal;
+      if (checkDicSize != 0 || processedPos != 0)
+        prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
+        (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
+
+      if (state < kNumLitStates)
+      {
+        state -= (state < 4) ? state : 3;
+        symbol = 1;
+        do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
+      }
+      else
+      {
+        unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+        unsigned offs = 0x100;
+        state -= (state < 10) ? 3 : 6;
+        symbol = 1;
+        do
+        {
+          unsigned bit;
+          CLzmaProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & offs);
+          probLit = prob + offs + bit + symbol;
+          GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
+        }
+        while (symbol < 0x100);
+      }
+      dic[dicPos++] = (Byte)symbol;
+      processedPos++;
+      continue;
+    }
+    else
+    {
+      UPDATE_1(prob);
+      prob = probs + IsRep + state;
+      IF_BIT_0(prob)
+      {
+        UPDATE_0(prob);
+        state += kNumStates;
+        prob = probs + LenCoder;
+      }
+      else
+      {
+        UPDATE_1(prob);
+        if (checkDicSize == 0 && processedPos == 0)
+          return SZ_ERROR_DATA;
+        prob = probs + IsRepG0 + state;
+        IF_BIT_0(prob)
+        {
+          UPDATE_0(prob);
+          prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IF_BIT_0(prob)
+          {
+            UPDATE_0(prob);
+            dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+            dicPos++;
+            processedPos++;
+            state = state < kNumLitStates ? 9 : 11;
+            continue;
+          }
+          UPDATE_1(prob);
+        }
+        else
+        {
+          UInt32 distance;
+          UPDATE_1(prob);
+          prob = probs + IsRepG1 + state;
+          IF_BIT_0(prob)
+          {
+            UPDATE_0(prob);
+            distance = rep1;
+          }
+          else
+          {
+            UPDATE_1(prob);
+            prob = probs + IsRepG2 + state;
+            IF_BIT_0(prob)
+            {
+              UPDATE_0(prob);
+              distance = rep2;
+            }
+            else
+            {
+              UPDATE_1(prob);
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        state = state < kNumLitStates ? 8 : 11;
+        prob = probs + RepLenCoder;
+      }
+      {
+        unsigned limit, offset;
+        CLzmaProb *probLen = prob + LenChoice;
+        IF_BIT_0(probLen)
+        {
+          UPDATE_0(probLen);
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          limit = (1 << kLenNumLowBits);
+        }
+        else
+        {
+          UPDATE_1(probLen);
+          probLen = prob + LenChoice2;
+          IF_BIT_0(probLen)
+          {
+            UPDATE_0(probLen);
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            limit = (1 << kLenNumMidBits);
+          }
+          else
+          {
+            UPDATE_1(probLen);
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            limit = (1 << kLenNumHighBits);
+          }
+        }
+        TREE_DECODE(probLen, limit, len);
+        len += offset;
+      }
+
+      if (state >= kNumStates)
+      {
+        UInt32 distance;
+        prob = probs + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
+        TREE_6_DECODE(prob, distance);
+        if (distance >= kStartPosModelIndex)
+        {
+          unsigned posSlot = (unsigned)distance;
+          int numDirectBits = (int)(((distance >> 1) - 1));
+          distance = (2 | (distance & 1));
+          if (posSlot < kEndPosModelIndex)
+          {
+            distance <<= numDirectBits;
+            prob = probs + SpecPos + distance - posSlot - 1;
+            {
+              UInt32 mask = 1;
+              unsigned i = 1;
+              do
+              {
+                GET_BIT2(prob + i, i, ; , distance |= mask);
+                mask <<= 1;
+              }
+              while (--numDirectBits != 0);
+            }
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              NORMALIZE
+              range >>= 1;
+              
+              {
+                UInt32 t;
+                code -= range;
+                t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
+                distance = (distance << 1) + (t + 1);
+                code += range & t;
+              }
+              /*
+              distance <<= 1;
+              if (code >= range)
+              {
+                code -= range;
+                distance |= 1;
+              }
+              */
+            }
+            while (--numDirectBits != 0);
+            prob = probs + Align;
+            distance <<= kNumAlignBits;
+            {
+              unsigned i = 1;
+              GET_BIT2(prob + i, i, ; , distance |= 1);
+              GET_BIT2(prob + i, i, ; , distance |= 2);
+              GET_BIT2(prob + i, i, ; , distance |= 4);
+              GET_BIT2(prob + i, i, ; , distance |= 8);
+            }
+            if (distance == (UInt32)0xFFFFFFFF)
+            {
+              len += kMatchSpecLenStart;
+              state -= kNumStates;
+              break;
+            }
+          }
+        }
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        rep0 = distance + 1;
+        if (checkDicSize == 0)
+        {
+          if (distance >= processedPos)
+            return SZ_ERROR_DATA;
+        }
+        else if (distance >= checkDicSize)
+          return SZ_ERROR_DATA;
+        state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
+      }
+
+      len += kMatchMinLen;
+
+      if (limit == dicPos)
+        return SZ_ERROR_DATA;
+      {
+        SizeT rem = limit - dicPos;
+        unsigned curLen = ((rem < len) ? (unsigned)rem : len);
+        SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
+
+        processedPos += curLen;
+
+        len -= curLen;
+        if (pos + curLen <= dicBufSize)
+        {
+          Byte *dest = dic + dicPos;
+          ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
+          const Byte *lim = dest + curLen;
+          dicPos += curLen;
+          do
+            *(dest) = (Byte)*(dest + src);
+          while (++dest != lim);
+        }
+        else
+        {
+          do
+          {
+            dic[dicPos++] = dic[pos];
+            if (++pos == dicBufSize)
+              pos = 0;
+          }
+          while (--curLen != 0);
+        }
+      }
+    }
+  }
+  while (dicPos < limit && buf < bufLimit);
+  NORMALIZE;
+  p->buf = buf;
+  p->range = range;
+  p->code = code;
+  p->remainLen = len;
+  p->dicPos = dicPos;
+  p->processedPos = processedPos;
+  p->reps[0] = rep0;
+  p->reps[1] = rep1;
+  p->reps[2] = rep2;
+  p->reps[3] = rep3;
+  p->state = state;
+
+  return SZ_OK;
+}
+
+static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
+{
+  if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
+  {
+    Byte *dic = p->dic;
+    SizeT dicPos = p->dicPos;
+    SizeT dicBufSize = p->dicBufSize;
+    unsigned len = p->remainLen;
+    UInt32 rep0 = p->reps[0];
+    if (limit - dicPos < len)
+      len = (unsigned)(limit - dicPos);
+
+    if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
+      p->checkDicSize = p->prop.dicSize;
+
+    p->processedPos += len;
+    p->remainLen -= len;
+    while (len-- != 0)
+    {
+      dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+      dicPos++;
+    }
+    p->dicPos = dicPos;
+  }
+}
+
+static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+  do
+  {
+    SizeT limit2 = limit;
+    if (p->checkDicSize == 0)
+    {
+      UInt32 rem = p->prop.dicSize - p->processedPos;
+      if (limit - p->dicPos > rem)
+        limit2 = p->dicPos + rem;
+    }
+    RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
+    if (p->processedPos >= p->prop.dicSize)
+      p->checkDicSize = p->prop.dicSize;
+    LzmaDec_WriteRem(p, limit);
+  }
+  while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
+
+  if (p->remainLen > kMatchSpecLenStart)
+  {
+    p->remainLen = kMatchSpecLenStart;
+  }
+  return 0;
+}
+
+typedef enum
+{
+  DUMMY_ERROR, /* unexpected end of input stream */
+  DUMMY_LIT,
+  DUMMY_MATCH,
+  DUMMY_REP
+} ELzmaDummy;
+
+static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
+{
+  UInt32 range = p->range;
+  UInt32 code = p->code;
+  const Byte *bufLimit = buf + inSize;
+  CLzmaProb *probs = p->probs;
+  unsigned state = p->state;
+  ELzmaDummy res;
+
+  {
+    CLzmaProb *prob;
+    UInt32 bound;
+    unsigned ttt;
+    unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
+
+    prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+    IF_BIT_0_CHECK(prob)
+    {
+      UPDATE_0_CHECK
+
+      /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
+
+      prob = probs + Literal;
+      if (p->checkDicSize != 0 || p->processedPos != 0)
+        prob += (LZMA_LIT_SIZE *
+          ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
+          (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
+
+      if (state < kNumLitStates)
+      {
+        unsigned symbol = 1;
+        do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
+      }
+      else
+      {
+        unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
+            ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
+        unsigned offs = 0x100;
+        unsigned symbol = 1;
+        do
+        {
+          unsigned bit;
+          CLzmaProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & offs);
+          probLit = prob + offs + bit + symbol;
+          GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
+        }
+        while (symbol < 0x100);
+      }
+      res = DUMMY_LIT;
+    }
+    else
+    {
+      unsigned len;
+      UPDATE_1_CHECK;
+
+      prob = probs + IsRep + state;
+      IF_BIT_0_CHECK(prob)
+      {
+        UPDATE_0_CHECK;
+        state = 0;
+        prob = probs + LenCoder;
+        res = DUMMY_MATCH;
+      }
+      else
+      {
+        UPDATE_1_CHECK;
+        res = DUMMY_REP;
+        prob = probs + IsRepG0 + state;
+        IF_BIT_0_CHECK(prob)
+        {
+          UPDATE_0_CHECK;
+          prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IF_BIT_0_CHECK(prob)
+          {
+            UPDATE_0_CHECK;
+            NORMALIZE_CHECK;
+            return DUMMY_REP;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+          }
+        }
+        else
+        {
+          UPDATE_1_CHECK;
+          prob = probs + IsRepG1 + state;
+          IF_BIT_0_CHECK(prob)
+          {
+            UPDATE_0_CHECK;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+            prob = probs + IsRepG2 + state;
+            IF_BIT_0_CHECK(prob)
+            {
+              UPDATE_0_CHECK;
+            }
+            else
+            {
+              UPDATE_1_CHECK;
+            }
+          }
+        }
+        state = kNumStates;
+        prob = probs + RepLenCoder;
+      }
+      {
+        unsigned limit, offset;
+        CLzmaProb *probLen = prob + LenChoice;
+        IF_BIT_0_CHECK(probLen)
+        {
+          UPDATE_0_CHECK;
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          limit = 1 << kLenNumLowBits;
+        }
+        else
+        {
+          UPDATE_1_CHECK;
+          probLen = prob + LenChoice2;
+          IF_BIT_0_CHECK(probLen)
+          {
+            UPDATE_0_CHECK;
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            limit = 1 << kLenNumMidBits;
+          }
+          else
+          {
+            UPDATE_1_CHECK;
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            limit = 1 << kLenNumHighBits;
+          }
+        }
+        TREE_DECODE_CHECK(probLen, limit, len);
+        len += offset;
+      }
+
+      if (state < 4)
+      {
+        unsigned posSlot;
+        prob = probs + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+            kNumPosSlotBits);
+        TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+
+          /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
+
+          if (posSlot < kEndPosModelIndex)
+          {
+            prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              NORMALIZE_CHECK
+              range >>= 1;
+              code -= range & (((code - range) >> 31) - 1);
+              /* if (code >= range) code -= range; */
+            }
+            while (--numDirectBits != 0);
+            prob = probs + Align;
+            numDirectBits = kNumAlignBits;
+          }
+          {
+            unsigned i = 1;
+            do
+            {
+              GET_BIT_CHECK(prob + i, i);
+            }
+            while (--numDirectBits != 0);
+          }
+        }
+      }
+    }
+  }
+  NORMALIZE_CHECK;
+  return res;
+}
+
+
+static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
+{
+  p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
+  p->range = 0xFFFFFFFF;
+  p->needFlush = 0;
+}
+
+static void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
+{
+  p->needFlush = 1;
+  p->remainLen = 0;
+  p->tempBufSize = 0;
+
+  if (initDic)
+  {
+    p->processedPos = 0;
+    p->checkDicSize = 0;
+    p->needInitState = 1;
+  }
+  if (initState)
+    p->needInitState = 1;
+}
+
+static void LzmaDec_Init(CLzmaDec *p)
+{
+  p->dicPos = 0;
+  LzmaDec_InitDicAndState(p, True, True);
+}
+
+static void LzmaDec_InitStateReal(CLzmaDec *p)
+{
+  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
+  UInt32 i;
+  CLzmaProb *probs = p->probs;
+  for (i = 0; i < numProbs; i++)
+    probs[i] = kBitModelTotal >> 1;
+  p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
+  p->state = 0;
+  p->needInitState = 0;
+}
+
+static SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+    ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+  SizeT inSize = *srcLen;
+  (*srcLen) = 0;
+  LzmaDec_WriteRem(p, dicLimit);
+  
+  *status = LZMA_STATUS_NOT_SPECIFIED;
+
+  while (p->remainLen != kMatchSpecLenStart)
+  {
+      int checkEndMarkNow;
+
+      if (p->needFlush != 0)
+      {
+        for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
+          p->tempBuf[p->tempBufSize++] = *src++;
+        if (p->tempBufSize < RC_INIT_SIZE)
+        {
+          *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+          return SZ_OK;
+        }
+        if (p->tempBuf[0] != 0)
+          return SZ_ERROR_DATA;
+
+        LzmaDec_InitRc(p, p->tempBuf);
+        p->tempBufSize = 0;
+      }
+
+      checkEndMarkNow = 0;
+      if (p->dicPos >= dicLimit)
+      {
+        if (p->remainLen == 0 && p->code == 0)
+        {
+          *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
+          return SZ_OK;
+        }
+        if (finishMode == LZMA_FINISH_ANY)
+        {
+          *status = LZMA_STATUS_NOT_FINISHED;
+          return SZ_OK;
+        }
+        if (p->remainLen != 0)
+        {
+          *status = LZMA_STATUS_NOT_FINISHED;
+          return SZ_ERROR_DATA;
+        }
+        checkEndMarkNow = 1;
+      }
+
+      if (p->needInitState)
+        LzmaDec_InitStateReal(p);
+  
+      if (p->tempBufSize == 0)
+      {
+        SizeT processed;
+        const Byte *bufLimit;
+        if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+        {
+          int dummyRes = LzmaDec_TryDummy(p, src, inSize);
+          if (dummyRes == DUMMY_ERROR)
+          {
+            memcpy(p->tempBuf, src, inSize);
+            p->tempBufSize = (unsigned)inSize;
+            (*srcLen) += inSize;
+            *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+            return SZ_OK;
+          }
+          if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+          {
+            *status = LZMA_STATUS_NOT_FINISHED;
+            return SZ_ERROR_DATA;
+          }
+          bufLimit = src;
+        }
+        else
+          bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
+        p->buf = src;
+        if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
+          return SZ_ERROR_DATA;
+        processed = (SizeT)(p->buf - src);
+        (*srcLen) += processed;
+        src += processed;
+        inSize -= processed;
+      }
+      else
+      {
+        unsigned rem = p->tempBufSize, lookAhead = 0;
+        while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
+          p->tempBuf[rem++] = src[lookAhead++];
+        p->tempBufSize = rem;
+        if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+        {
+          int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
+          if (dummyRes == DUMMY_ERROR)
+          {
+            (*srcLen) += lookAhead;
+            *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+            return SZ_OK;
+          }
+          if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+          {
+            *status = LZMA_STATUS_NOT_FINISHED;
+            return SZ_ERROR_DATA;
+          }
+        }
+        p->buf = p->tempBuf;
+        if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
+          return SZ_ERROR_DATA;
+        lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
+        (*srcLen) += lookAhead;
+        src += lookAhead;
+        inSize -= lookAhead;
+        p->tempBufSize = 0;
+      }
+  }
+  if (p->code == 0)
+    *status = LZMA_STATUS_FINISHED_WITH_MARK;
+  return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
+}
+
+static void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->probs);
+  p->probs = 0;
+}
+
+static SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned int size)
+{
+  UInt32 dicSize;
+  Byte d;
+  
+  if (size < LZMA_PROPS_SIZE)
+    return SZ_ERROR_UNSUPPORTED;
+  else
+    dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
+ 
+  if (dicSize < LZMA_DIC_MIN)
+    dicSize = LZMA_DIC_MIN;
+  p->dicSize = dicSize;
+
+  d = data[0];
+  if (d >= (9 * 5 * 5))
+    return SZ_ERROR_UNSUPPORTED;
+
+  p->lc = d % 9;
+  d /= 9;
+  p->pb = d / 5;
+  p->lp = d % 5;
+
+  return SZ_OK;
+}
+
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
+{
+  UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
+  if (p->probs == 0 || numProbs != p->numProbs)
+  {
+    LzmaDec_FreeProbs(p, alloc);
+    p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
+    p->numProbs = numProbs;
+    if (p->probs == 0)
+      return SZ_ERROR_MEM;
+  }
+  return SZ_OK;
+}
+
+static SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned int propsSize, ISzAlloc *alloc)
+{
+  CLzmaProps propNew;
+  RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+  RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+  p->prop = propNew;
+  return SZ_OK;
+}
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+    const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+    ELzmaStatus *status, ISzAlloc *alloc)
+{
+  CLzmaDec p;
+  SRes res;
+  SizeT inSize = *srcLen;
+  SizeT outSize = *destLen;
+  *srcLen = *destLen = 0;
+  if (inSize < RC_INIT_SIZE)
+    return SZ_ERROR_INPUT_EOF;
+
+  LzmaDec_Construct(&p);
+  res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
+  if (res != 0)
+    return res;
+  p.dic = dest;
+  p.dicBufSize = outSize;
+
+  LzmaDec_Init(&p);
+  
+  *srcLen = inSize;
+  res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
+
+  if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+    res = SZ_ERROR_INPUT_EOF;
+
+  (*destLen) = p.dicPos;
+  LzmaDec_FreeProbs(&p, alloc);
+  return res;
+}
diff --git a/lib/lzma/LzmaEnc.c b/lib/lzma/LzmaEnc.c
new file mode 100644
index 000000000..943965647
--- /dev/null
+++ b/lib/lzma/LzmaEnc.c
@@ -0,0 +1,2123 @@
+/* LzmaEnc.c -- LZMA Encoder
+2009-11-24 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+/* #define SHOW_STAT */
+/* #define SHOW_STAT2 */
+
+#if defined(SHOW_STAT) || defined(SHOW_STAT2)
+#include <stdio.h>
+#endif
+
+#include "LzmaEnc.h"
+
+/* disable MT */
+#define _7ZIP_ST
+
+#include "LzFind.h"
+#ifndef _7ZIP_ST
+#include "LzFindMt.h"
+#endif
+
+#ifdef SHOW_STAT
+static int ttt = 0;
+#endif
+
+#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
+
+#define kBlockSize (9 << 10)
+#define kUnpackBlockSize (1 << 18)
+#define kMatchArraySize (1 << 21)
+#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
+
+#define kNumMaxDirectBits (31)
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+#define kProbInitValue (kBitModelTotal >> 1)
+
+#define kNumMoveReducingBits 4
+#define kNumBitPriceShiftBits 4
+#define kBitPrice (1 << kNumBitPriceShiftBits)
+
+void LzmaEncProps_Init(CLzmaEncProps *p)
+{
+  p->level = 5;
+  p->dictSize = p->mc = 0;
+  p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
+  p->writeEndMark = 0;
+}
+
+static void LzmaEncProps_Normalize(CLzmaEncProps *p)
+{
+  int level = p->level;
+  if (level < 0) level = 5;
+  p->level = level;
+  if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
+  if (p->lc < 0) p->lc = 3;
+  if (p->lp < 0) p->lp = 0;
+  if (p->pb < 0) p->pb = 2;
+  if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
+  if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
+  if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
+  if (p->numHashBytes < 0) p->numHashBytes = 4;
+  if (p->mc == 0)  p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
+  if (p->numThreads < 0)
+    p->numThreads =
+      #ifndef _7ZIP_ST
+      ((p->btMode && p->algo) ? 2 : 1);
+      #else
+      1;
+      #endif
+}
+
+static UInt32 __maybe_unused LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+{
+  CLzmaEncProps props = *props2;
+  LzmaEncProps_Normalize(&props);
+  return props.dictSize;
+}
+
+/* #define LZMA_LOG_BSR */
+/* Define it for Intel's CPU */
+
+
+#ifdef LZMA_LOG_BSR
+
+#define kDicLogSizeMaxCompress 30
+
+#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
+
+static UInt32 GetPosSlot1(UInt32 pos)
+{
+  UInt32 res;
+  BSR2_RET(pos, res);
+  return res;
+}
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
+
+#else
+
+#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
+#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+static void LzmaEnc_FastPosInit(Byte *g_FastPos)
+{
+  int c = 2, slotFast;
+  g_FastPos[0] = 0;
+  g_FastPos[1] = 1;
+  
+  for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
+  {
+    UInt32 k = (1 << ((slotFast >> 1) - 1));
+    UInt32 j;
+    for (j = 0; j < k; j++, c++)
+      g_FastPos[c] = (Byte)slotFast;
+  }
+}
+
+#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
+  (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
+  res = p->g_FastPos[pos >> i] + (i * 2); }
+/*
+#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
+  p->g_FastPos[pos >> 6] + 12 : \
+  p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
+*/
+
+#define GetPosSlot1(pos) p->g_FastPos[pos]
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
+
+#endif
+
+
+#define LZMA_NUM_REPS 4
+
+typedef unsigned CState;
+
+typedef struct
+{
+  UInt32 price;
+
+  CState state;
+  int prev1IsChar;
+  int prev2;
+
+  UInt32 posPrev2;
+  UInt32 backPrev2;
+
+  UInt32 posPrev;
+  UInt32 backPrev;
+  UInt32 backs[LZMA_NUM_REPS];
+} COptimal;
+
+#define kNumOpts (1 << 12)
+
+#define kNumLenToPosStates 4
+#define kNumPosSlotBits 6
+#define kDicLogSizeMin 0
+#define kDicLogSizeMax 32
+#define kDistTableSizeMax (kDicLogSizeMax * 2)
+
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+#define kAlignMask (kAlignTableSize - 1)
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
+
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+#define LZMA_PB_MAX 4
+#define LZMA_LC_MAX 8
+#define LZMA_LP_MAX 4
+
+#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
+
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define LZMA_MATCH_LEN_MIN 2
+#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
+
+#define kNumStates 12
+
+typedef struct
+{
+  CLzmaProb choice;
+  CLzmaProb choice2;
+  CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
+  CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
+  CLzmaProb high[kLenNumHighSymbols];
+} CLenEnc;
+
+typedef struct
+{
+  CLenEnc p;
+  UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
+  UInt32 tableSize;
+  UInt32 counters[LZMA_NUM_PB_STATES_MAX];
+} CLenPriceEnc;
+
+typedef struct
+{
+  UInt32 range;
+  Byte cache;
+  UInt64 low;
+  UInt64 cacheSize;
+  Byte *buf;
+  Byte *bufLim;
+  Byte *bufBase;
+  ISeqOutStream *outStream;
+  UInt64 processed;
+  SRes res;
+} CRangeEnc;
+
+typedef struct
+{
+  CLzmaProb *litProbs;
+
+  CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+  CLzmaProb isRep[kNumStates];
+  CLzmaProb isRepG0[kNumStates];
+  CLzmaProb isRepG1[kNumStates];
+  CLzmaProb isRepG2[kNumStates];
+  CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+  CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+  CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+  CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+  
+  CLenPriceEnc lenEnc;
+  CLenPriceEnc repLenEnc;
+
+  UInt32 reps[LZMA_NUM_REPS];
+  UInt32 state;
+} CSaveState;
+
+typedef struct
+{
+  IMatchFinder matchFinder;
+  void *matchFinderObj;
+
+  #ifndef _7ZIP_ST
+  Bool mtMode;
+  CMatchFinderMt matchFinderMt;
+  #endif
+
+  CMatchFinder matchFinderBase;
+
+  #ifndef _7ZIP_ST
+  Byte pad[128];
+  #endif
+  
+  UInt32 optimumEndIndex;
+  UInt32 optimumCurrentIndex;
+
+  UInt32 longestMatchLength;
+  UInt32 numPairs;
+  UInt32 numAvail;
+  COptimal opt[kNumOpts];
+  
+  #ifndef LZMA_LOG_BSR
+  Byte g_FastPos[1 << kNumLogBits];
+  #endif
+
+  UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
+  UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
+  UInt32 numFastBytes;
+  UInt32 additionalOffset;
+  UInt32 reps[LZMA_NUM_REPS];
+  UInt32 state;
+
+  UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
+  UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
+  UInt32 alignPrices[kAlignTableSize];
+  UInt32 alignPriceCount;
+
+  UInt32 distTableSize;
+
+  unsigned lc, lp, pb;
+  unsigned lpMask, pbMask;
+
+  CLzmaProb *litProbs;
+
+  CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+  CLzmaProb isRep[kNumStates];
+  CLzmaProb isRepG0[kNumStates];
+  CLzmaProb isRepG1[kNumStates];
+  CLzmaProb isRepG2[kNumStates];
+  CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+  CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+  CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+  CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+  
+  CLenPriceEnc lenEnc;
+  CLenPriceEnc repLenEnc;
+
+  unsigned lclp;
+
+  Bool fastMode;
+  
+  CRangeEnc rc;
+
+  Bool writeEndMark;
+  UInt64 nowPos64;
+  UInt32 matchPriceCount;
+  Bool finished;
+  Bool multiThread;
+
+  SRes result;
+  UInt32 dictSize;
+  UInt32 matchFinderCycles;
+
+  int needInit;
+
+  CSaveState saveState;
+} CLzmaEnc;
+
+SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  CLzmaEncProps props = *props2;
+  LzmaEncProps_Normalize(&props);
+
+  if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
+      props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
+    return SZ_ERROR_PARAM;
+  p->dictSize = props.dictSize;
+  p->matchFinderCycles = props.mc;
+  {
+    unsigned fb = props.fb;
+    if (fb < 5)
+      fb = 5;
+    if (fb > LZMA_MATCH_LEN_MAX)
+      fb = LZMA_MATCH_LEN_MAX;
+    p->numFastBytes = fb;
+  }
+  p->lc = props.lc;
+  p->lp = props.lp;
+  p->pb = props.pb;
+  p->fastMode = (props.algo == 0);
+  p->matchFinderBase.btMode = props.btMode;
+  {
+    UInt32 numHashBytes = 4;
+    if (props.btMode)
+    {
+      if (props.numHashBytes < 2)
+        numHashBytes = 2;
+      else if (props.numHashBytes < 4)
+        numHashBytes = props.numHashBytes;
+    }
+    p->matchFinderBase.numHashBytes = numHashBytes;
+  }
+
+  p->matchFinderBase.cutValue = props.mc;
+
+  p->writeEndMark = props.writeEndMark;
+
+  #ifndef _7ZIP_ST
+  /*
+  if (newMultiThread != _multiThread)
+  {
+    ReleaseMatchFinder();
+    _multiThread = newMultiThread;
+  }
+  */
+  p->multiThread = (props.numThreads > 1);
+  #endif
+
+  return SZ_OK;
+}
+
+static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4,  5,  6,   4, 5};
+static const int kMatchNextStates[kNumStates]   = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+static const int kRepNextStates[kNumStates]     = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+
+#define IsCharState(s) ((s) < 7)
+
+#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
+
+#define kInfinityPrice (1 << 30)
+
+static void RangeEnc_Construct(CRangeEnc *p)
+{
+  p->outStream = 0;
+  p->bufBase = 0;
+}
+
+#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
+
+#define RC_BUF_SIZE (1 << 16)
+static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
+{
+  if (p->bufBase == 0)
+  {
+    p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
+    if (p->bufBase == 0)
+      return 0;
+    p->bufLim = p->bufBase + RC_BUF_SIZE;
+  }
+  return 1;
+}
+
+static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->bufBase);
+  p->bufBase = 0;
+}
+
+static void RangeEnc_Init(CRangeEnc *p)
+{
+  /* Stream.Init(); */
+  p->low = 0;
+  p->range = 0xFFFFFFFF;
+  p->cacheSize = 1;
+  p->cache = 0;
+
+  p->buf = p->bufBase;
+
+  p->processed = 0;
+  p->res = SZ_OK;
+}
+
+static void RangeEnc_FlushStream(CRangeEnc *p)
+{
+  size_t num;
+  if (p->res != SZ_OK)
+    return;
+  num = p->buf - p->bufBase;
+  if (num != p->outStream->Write(p->outStream, p->bufBase, num))
+    p->res = SZ_ERROR_WRITE;
+  p->processed += num;
+  p->buf = p->bufBase;
+}
+
+static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
+{
+  if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
+  {
+    Byte temp = p->cache;
+    do
+    {
+      Byte *buf = p->buf;
+      *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
+      p->buf = buf;
+      if (buf == p->bufLim)
+        RangeEnc_FlushStream(p);
+      temp = 0xFF;
+    }
+    while (--p->cacheSize != 0);
+    p->cache = (Byte)((UInt32)p->low >> 24);
+  }
+  p->cacheSize++;
+  p->low = (UInt32)p->low << 8;
+}
+
+static void RangeEnc_FlushData(CRangeEnc *p)
+{
+  int i;
+  for (i = 0; i < 5; i++)
+    RangeEnc_ShiftLow(p);
+}
+
+static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
+{
+  do
+  {
+    p->range >>= 1;
+    p->low += p->range & (0 - ((value >> --numBits) & 1));
+    if (p->range < kTopValue)
+    {
+      p->range <<= 8;
+      RangeEnc_ShiftLow(p);
+    }
+  }
+  while (numBits != 0);
+}
+
+static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
+{
+  UInt32 ttt = *prob;
+  UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
+  if (symbol == 0)
+  {
+    p->range = newBound;
+    ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
+  }
+  else
+  {
+    p->low += newBound;
+    p->range -= newBound;
+    ttt -= ttt >> kNumMoveBits;
+  }
+  *prob = (CLzmaProb)ttt;
+  if (p->range < kTopValue)
+  {
+    p->range <<= 8;
+    RangeEnc_ShiftLow(p);
+  }
+}
+
+static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
+{
+  symbol |= 0x100;
+  do
+  {
+    RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
+    symbol <<= 1;
+  }
+  while (symbol < 0x10000);
+}
+
+static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
+{
+  UInt32 offs = 0x100;
+  symbol |= 0x100;
+  do
+  {
+    matchByte <<= 1;
+    RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
+    symbol <<= 1;
+    offs &= ~(matchByte ^ symbol);
+  }
+  while (symbol < 0x10000);
+}
+
+static void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
+{
+  UInt32 i;
+  for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
+  {
+    const int kCyclesBits = kNumBitPriceShiftBits;
+    UInt32 w = i;
+    UInt32 bitCount = 0;
+    int j;
+    for (j = 0; j < kCyclesBits; j++)
+    {
+      w = w * w;
+      bitCount <<= 1;
+      while (w >= ((UInt32)1 << 16))
+      {
+        w >>= 1;
+        bitCount++;
+      }
+    }
+    ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
+  }
+}
+
+
+#define GET_PRICE(prob, symbol) \
+  p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICEa(prob, symbol) \
+  ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  symbol |= 0x100;
+  do
+  {
+    price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
+    symbol <<= 1;
+  }
+  while (symbol < 0x10000);
+  return price;
+}
+
+static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  UInt32 offs = 0x100;
+  symbol |= 0x100;
+  do
+  {
+    matchByte <<= 1;
+    price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
+    symbol <<= 1;
+    offs &= ~(matchByte ^ symbol);
+  }
+  while (symbol < 0x10000);
+  return price;
+}
+
+
+static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+  UInt32 m = 1;
+  int i;
+  for (i = numBitLevels; i != 0;)
+  {
+    UInt32 bit;
+    i--;
+    bit = (symbol >> i) & 1;
+    RangeEnc_EncodeBit(rc, probs + m, bit);
+    m = (m << 1) | bit;
+  }
+}
+
+static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+  UInt32 m = 1;
+  int i;
+  for (i = 0; i < numBitLevels; i++)
+  {
+    UInt32 bit = symbol & 1;
+    RangeEnc_EncodeBit(rc, probs + m, bit);
+    m = (m << 1) | bit;
+    symbol >>= 1;
+  }
+}
+
+static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  symbol |= (1 << numBitLevels);
+  while (symbol != 1)
+  {
+    price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
+    symbol >>= 1;
+  }
+  return price;
+}
+
+static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+  UInt32 price = 0;
+  UInt32 m = 1;
+  int i;
+  for (i = numBitLevels; i != 0; i--)
+  {
+    UInt32 bit = symbol & 1;
+    symbol >>= 1;
+    price += GET_PRICEa(probs[m], bit);
+    m = (m << 1) | bit;
+  }
+  return price;
+}
+
+
+static void LenEnc_Init(CLenEnc *p)
+{
+  unsigned i;
+  p->choice = p->choice2 = kProbInitValue;
+  for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
+    p->low[i] = kProbInitValue;
+  for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
+    p->mid[i] = kProbInitValue;
+  for (i = 0; i < kLenNumHighSymbols; i++)
+    p->high[i] = kProbInitValue;
+}
+
+static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
+{
+  if (symbol < kLenNumLowSymbols)
+  {
+    RangeEnc_EncodeBit(rc, &p->choice, 0);
+    RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
+  }
+  else
+  {
+    RangeEnc_EncodeBit(rc, &p->choice, 1);
+    if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
+    {
+      RangeEnc_EncodeBit(rc, &p->choice2, 0);
+      RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
+    }
+    else
+    {
+      RangeEnc_EncodeBit(rc, &p->choice2, 1);
+      RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
+    }
+  }
+}
+
+static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
+{
+  UInt32 a0 = GET_PRICE_0a(p->choice);
+  UInt32 a1 = GET_PRICE_1a(p->choice);
+  UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
+  UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
+  UInt32 i = 0;
+  for (i = 0; i < kLenNumLowSymbols; i++)
+  {
+    if (i >= numSymbols)
+      return;
+    prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
+  }
+  for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
+  {
+    if (i >= numSymbols)
+      return;
+    prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
+  }
+  for (; i < numSymbols; i++)
+    prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
+}
+
+static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
+{
+  LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
+  p->counters[posState] = p->tableSize;
+}
+
+static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
+{
+  UInt32 posState;
+  for (posState = 0; posState < numPosStates; posState++)
+    LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
+{
+  LenEnc_Encode(&p->p, rc, symbol, posState);
+  if (updatePrice)
+    if (--p->counters[posState] == 0)
+      LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+
+
+
+static void MovePos(CLzmaEnc *p, UInt32 num)
+{
+  #ifdef SHOW_STAT
+  ttt += num;
+  printf("\n MovePos %d", num);
+  #endif
+  if (num != 0)
+  {
+    p->additionalOffset += num;
+    p->matchFinder.Skip(p->matchFinderObj, num);
+  }
+}
+
+static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
+{
+  UInt32 lenRes = 0, numPairs;
+  p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+  numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
+  #ifdef SHOW_STAT
+  printf("\n i = %d numPairs = %d    ", ttt, numPairs / 2);
+  ttt++;
+  {
+    UInt32 i;
+    for (i = 0; i < numPairs; i += 2)
+      printf("%2d %6d   | ", p->matches[i], p->matches[i + 1]);
+  }
+  #endif
+  if (numPairs > 0)
+  {
+    lenRes = p->matches[numPairs - 2];
+    if (lenRes == p->numFastBytes)
+    {
+      const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+      UInt32 distance = p->matches[numPairs - 1] + 1;
+      UInt32 numAvail = p->numAvail;
+      if (numAvail > LZMA_MATCH_LEN_MAX)
+        numAvail = LZMA_MATCH_LEN_MAX;
+      {
+        const Byte *pby2 = pby - distance;
+        for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
+      }
+    }
+  }
+  p->additionalOffset++;
+  *numDistancePairsRes = numPairs;
+  return lenRes;
+}
+
+
+#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
+#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
+#define IsShortRep(p) ((p)->backPrev == 0)
+
+static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
+{
+  return
+    GET_PRICE_0(p->isRepG0[state]) +
+    GET_PRICE_0(p->isRep0Long[state][posState]);
+}
+
+static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
+{
+  UInt32 price;
+  if (repIndex == 0)
+  {
+    price = GET_PRICE_0(p->isRepG0[state]);
+    price += GET_PRICE_1(p->isRep0Long[state][posState]);
+  }
+  else
+  {
+    price = GET_PRICE_1(p->isRepG0[state]);
+    if (repIndex == 1)
+      price += GET_PRICE_0(p->isRepG1[state]);
+    else
+    {
+      price += GET_PRICE_1(p->isRepG1[state]);
+      price += GET_PRICE(p->isRepG2[state], repIndex - 2);
+    }
+  }
+  return price;
+}
+
+static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
+{
+  return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
+    GetPureRepPrice(p, repIndex, state, posState);
+}
+
+static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
+{
+  UInt32 posMem = p->opt[cur].posPrev;
+  UInt32 backMem = p->opt[cur].backPrev;
+  p->optimumEndIndex = cur;
+  do
+  {
+    if (p->opt[cur].prev1IsChar)
+    {
+      MakeAsChar(&p->opt[posMem])
+      p->opt[posMem].posPrev = posMem - 1;
+      if (p->opt[cur].prev2)
+      {
+        p->opt[posMem - 1].prev1IsChar = False;
+        p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
+        p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
+      }
+    }
+    {
+      UInt32 posPrev = posMem;
+      UInt32 backCur = backMem;
+      
+      backMem = p->opt[posPrev].backPrev;
+      posMem = p->opt[posPrev].posPrev;
+      
+      p->opt[posPrev].backPrev = backCur;
+      p->opt[posPrev].posPrev = cur;
+      cur = posPrev;
+    }
+  }
+  while (cur != 0);
+  *backRes = p->opt[0].backPrev;
+  p->optimumCurrentIndex  = p->opt[0].posPrev;
+  return p->optimumCurrentIndex;
+}
+
+#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
+
+static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
+{
+  UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
+  UInt32 matchPrice, repMatchPrice, normalMatchPrice;
+  UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
+  UInt32 *matches;
+  const Byte *data;
+  Byte curByte, matchByte;
+  if (p->optimumEndIndex != p->optimumCurrentIndex)
+  {
+    const COptimal *opt = &p->opt[p->optimumCurrentIndex];
+    UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
+    *backRes = opt->backPrev;
+    p->optimumCurrentIndex = opt->posPrev;
+    return lenRes;
+  }
+  p->optimumCurrentIndex = p->optimumEndIndex = 0;
+  
+  if (p->additionalOffset == 0)
+    mainLen = ReadMatchDistances(p, &numPairs);
+  else
+  {
+    mainLen = p->longestMatchLength;
+    numPairs = p->numPairs;
+  }
+
+  numAvail = p->numAvail;
+  if (numAvail < 2)
+  {
+    *backRes = (UInt32)(-1);
+    return 1;
+  }
+  if (numAvail > LZMA_MATCH_LEN_MAX)
+    numAvail = LZMA_MATCH_LEN_MAX;
+
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+  repMaxIndex = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 lenTest;
+    const Byte *data2;
+    reps[i] = p->reps[i];
+    data2 = data - (reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+    {
+      repLens[i] = 0;
+      continue;
+    }
+    for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+    repLens[i] = lenTest;
+    if (lenTest > repLens[repMaxIndex])
+      repMaxIndex = i;
+  }
+  if (repLens[repMaxIndex] >= p->numFastBytes)
+  {
+    UInt32 lenRes;
+    *backRes = repMaxIndex;
+    lenRes = repLens[repMaxIndex];
+    MovePos(p, lenRes - 1);
+    return lenRes;
+  }
+
+  matches = p->matches;
+  if (mainLen >= p->numFastBytes)
+  {
+    *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+    MovePos(p, mainLen - 1);
+    return mainLen;
+  }
+  curByte = *data;
+  matchByte = *(data - (reps[0] + 1));
+
+  if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
+  {
+    *backRes = (UInt32)-1;
+    return 1;
+  }
+
+  p->opt[0].state = (CState)p->state;
+
+  posState = (position & p->pbMask);
+
+  {
+    const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+    p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
+        (!IsCharState(p->state) ?
+          LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+          LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+  }
+
+  MakeAsChar(&p->opt[1]);
+
+  matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
+  repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
+
+  if (matchByte == curByte)
+  {
+    UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
+    if (shortRepPrice < p->opt[1].price)
+    {
+      p->opt[1].price = shortRepPrice;
+      MakeAsShortRep(&p->opt[1]);
+    }
+  }
+  lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
+
+  if (lenEnd < 2)
+  {
+    *backRes = p->opt[1].backPrev;
+    return 1;
+  }
+
+  p->opt[1].posPrev = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+    p->opt[0].backs[i] = reps[i];
+
+  len = lenEnd;
+  do
+    p->opt[len--].price = kInfinityPrice;
+  while (len >= 2);
+
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 repLen = repLens[i];
+    UInt32 price;
+    if (repLen < 2)
+      continue;
+    price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
+    do
+    {
+      UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
+      COptimal *opt = &p->opt[repLen];
+      if (curAndLenPrice < opt->price)
+      {
+        opt->price = curAndLenPrice;
+        opt->posPrev = 0;
+        opt->backPrev = i;
+        opt->prev1IsChar = False;
+      }
+    }
+    while (--repLen >= 2);
+  }
+
+  normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
+
+  len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
+  if (len <= mainLen)
+  {
+    UInt32 offs = 0;
+    while (len > matches[offs])
+      offs += 2;
+    for (; ; len++)
+    {
+      COptimal *opt;
+      UInt32 distance = matches[offs + 1];
+
+      UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
+      UInt32 lenToPosState = GetLenToPosState(len);
+      if (distance < kNumFullDistances)
+        curAndLenPrice += p->distancesPrices[lenToPosState][distance];
+      else
+      {
+        UInt32 slot;
+        GetPosSlot2(distance, slot);
+        curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
+      }
+      opt = &p->opt[len];
+      if (curAndLenPrice < opt->price)
+      {
+        opt->price = curAndLenPrice;
+        opt->posPrev = 0;
+        opt->backPrev = distance + LZMA_NUM_REPS;
+        opt->prev1IsChar = False;
+      }
+      if (len == matches[offs])
+      {
+        offs += 2;
+        if (offs == numPairs)
+          break;
+      }
+    }
+  }
+
+  cur = 0;
+
+    #ifdef SHOW_STAT2
+    if (position >= 0)
+    {
+      unsigned i;
+      printf("\n pos = %4X", position);
+      for (i = cur; i <= lenEnd; i++)
+      printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
+    }
+    #endif
+
+  for (;;)
+  {
+    UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
+    UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
+    Bool nextIsChar;
+    Byte curByte, matchByte;
+    const Byte *data;
+    COptimal *curOpt;
+    COptimal *nextOpt;
+
+    cur++;
+    if (cur == lenEnd)
+      return Backward(p, backRes, cur);
+
+    newLen = ReadMatchDistances(p, &numPairs);
+    if (newLen >= p->numFastBytes)
+    {
+      p->numPairs = numPairs;
+      p->longestMatchLength = newLen;
+      return Backward(p, backRes, cur);
+    }
+    position++;
+    curOpt = &p->opt[cur];
+    posPrev = curOpt->posPrev;
+    if (curOpt->prev1IsChar)
+    {
+      posPrev--;
+      if (curOpt->prev2)
+      {
+        state = p->opt[curOpt->posPrev2].state;
+        if (curOpt->backPrev2 < LZMA_NUM_REPS)
+          state = kRepNextStates[state];
+        else
+          state = kMatchNextStates[state];
+      }
+      else
+        state = p->opt[posPrev].state;
+      state = kLiteralNextStates[state];
+    }
+    else
+      state = p->opt[posPrev].state;
+    if (posPrev == cur - 1)
+    {
+      if (IsShortRep(curOpt))
+        state = kShortRepNextStates[state];
+      else
+        state = kLiteralNextStates[state];
+    }
+    else
+    {
+      UInt32 pos;
+      const COptimal *prevOpt;
+      if (curOpt->prev1IsChar && curOpt->prev2)
+      {
+        posPrev = curOpt->posPrev2;
+        pos = curOpt->backPrev2;
+        state = kRepNextStates[state];
+      }
+      else
+      {
+        pos = curOpt->backPrev;
+        if (pos < LZMA_NUM_REPS)
+          state = kRepNextStates[state];
+        else
+          state = kMatchNextStates[state];
+      }
+      prevOpt = &p->opt[posPrev];
+      if (pos < LZMA_NUM_REPS)
+      {
+        UInt32 i;
+        reps[0] = prevOpt->backs[pos];
+        for (i = 1; i <= pos; i++)
+          reps[i] = prevOpt->backs[i - 1];
+        for (; i < LZMA_NUM_REPS; i++)
+          reps[i] = prevOpt->backs[i];
+      }
+      else
+      {
+        UInt32 i;
+        reps[0] = (pos - LZMA_NUM_REPS);
+        for (i = 1; i < LZMA_NUM_REPS; i++)
+          reps[i] = prevOpt->backs[i - 1];
+      }
+    }
+    curOpt->state = (CState)state;
+
+    curOpt->backs[0] = reps[0];
+    curOpt->backs[1] = reps[1];
+    curOpt->backs[2] = reps[2];
+    curOpt->backs[3] = reps[3];
+
+    curPrice = curOpt->price;
+    nextIsChar = False;
+    data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+    curByte = *data;
+    matchByte = *(data - (reps[0] + 1));
+
+    posState = (position & p->pbMask);
+
+    curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
+    {
+      const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+      curAnd1Price +=
+        (!IsCharState(state) ?
+          LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+          LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+    }
+
+    nextOpt = &p->opt[cur + 1];
+
+    if (curAnd1Price < nextOpt->price)
+    {
+      nextOpt->price = curAnd1Price;
+      nextOpt->posPrev = cur;
+      MakeAsChar(nextOpt);
+      nextIsChar = True;
+    }
+
+    matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
+    repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
+    
+    if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
+    {
+      UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
+      if (shortRepPrice <= nextOpt->price)
+      {
+        nextOpt->price = shortRepPrice;
+        nextOpt->posPrev = cur;
+        MakeAsShortRep(nextOpt);
+        nextIsChar = True;
+      }
+    }
+    numAvailFull = p->numAvail;
+    {
+      UInt32 temp = kNumOpts - 1 - cur;
+      if (temp < numAvailFull)
+        numAvailFull = temp;
+    }
+
+    if (numAvailFull < 2)
+      continue;
+    numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
+
+    if (!nextIsChar && matchByte != curByte) /* speed optimization */
+    {
+      /* try Literal + rep0 */
+      UInt32 temp;
+      UInt32 lenTest2;
+      const Byte *data2 = data - (reps[0] + 1);
+      UInt32 limit = p->numFastBytes + 1;
+      if (limit > numAvailFull)
+        limit = numAvailFull;
+
+      for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
+      lenTest2 = temp - 1;
+      if (lenTest2 >= 2)
+      {
+        UInt32 state2 = kLiteralNextStates[state];
+        UInt32 posStateNext = (position + 1) & p->pbMask;
+        UInt32 nextRepMatchPrice = curAnd1Price +
+            GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+            GET_PRICE_1(p->isRep[state2]);
+        /* for (; lenTest2 >= 2; lenTest2--) */
+        {
+          UInt32 curAndLenPrice;
+          COptimal *opt;
+          UInt32 offset = cur + 1 + lenTest2;
+          while (lenEnd < offset)
+            p->opt[++lenEnd].price = kInfinityPrice;
+          curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+          opt = &p->opt[offset];
+          if (curAndLenPrice < opt->price)
+          {
+            opt->price = curAndLenPrice;
+            opt->posPrev = cur + 1;
+            opt->backPrev = 0;
+            opt->prev1IsChar = True;
+            opt->prev2 = False;
+          }
+        }
+      }
+    }
+    
+    startLen = 2; /* speed optimization */
+    {
+    UInt32 repIndex;
+    for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
+    {
+      UInt32 lenTest;
+      UInt32 lenTestTemp;
+      UInt32 price;
+      const Byte *data2 = data - (reps[repIndex] + 1);
+      if (data[0] != data2[0] || data[1] != data2[1])
+        continue;
+      for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+      while (lenEnd < cur + lenTest)
+        p->opt[++lenEnd].price = kInfinityPrice;
+      lenTestTemp = lenTest;
+      price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
+      do
+      {
+        UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
+        COptimal *opt = &p->opt[cur + lenTest];
+        if (curAndLenPrice < opt->price)
+        {
+          opt->price = curAndLenPrice;
+          opt->posPrev = cur;
+          opt->backPrev = repIndex;
+          opt->prev1IsChar = False;
+        }
+      }
+      while (--lenTest >= 2);
+      lenTest = lenTestTemp;
+      
+      if (repIndex == 0)
+        startLen = lenTest + 1;
+        
+      /* if (_maxMode) */
+        {
+          UInt32 lenTest2 = lenTest + 1;
+          UInt32 limit = lenTest2 + p->numFastBytes;
+          UInt32 nextRepMatchPrice;
+          if (limit > numAvailFull)
+            limit = numAvailFull;
+          for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+          lenTest2 -= lenTest + 1;
+          if (lenTest2 >= 2)
+          {
+            UInt32 state2 = kRepNextStates[state];
+            UInt32 posStateNext = (position + lenTest) & p->pbMask;
+            UInt32 curAndLenCharPrice =
+                price + p->repLenEnc.prices[posState][lenTest - 2] +
+                GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+                LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+                    data[lenTest], data2[lenTest], p->ProbPrices);
+            state2 = kLiteralNextStates[state2];
+            posStateNext = (position + lenTest + 1) & p->pbMask;
+            nextRepMatchPrice = curAndLenCharPrice +
+                GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+                GET_PRICE_1(p->isRep[state2]);
+            
+            /* for (; lenTest2 >= 2; lenTest2--) */
+            {
+              UInt32 curAndLenPrice;
+              COptimal *opt;
+              UInt32 offset = cur + lenTest + 1 + lenTest2;
+              while (lenEnd < offset)
+                p->opt[++lenEnd].price = kInfinityPrice;
+              curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+              opt = &p->opt[offset];
+              if (curAndLenPrice < opt->price)
+              {
+                opt->price = curAndLenPrice;
+                opt->posPrev = cur + lenTest + 1;
+                opt->backPrev = 0;
+                opt->prev1IsChar = True;
+                opt->prev2 = True;
+                opt->posPrev2 = cur;
+                opt->backPrev2 = repIndex;
+              }
+            }
+          }
+        }
+    }
+    }
+    /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
+    if (newLen > numAvail)
+    {
+      newLen = numAvail;
+      for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
+      matches[numPairs] = newLen;
+      numPairs += 2;
+    }
+    if (newLen >= startLen)
+    {
+      UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
+      UInt32 offs, curBack, posSlot;
+      UInt32 lenTest;
+      while (lenEnd < cur + newLen)
+        p->opt[++lenEnd].price = kInfinityPrice;
+
+      offs = 0;
+      while (startLen > matches[offs])
+        offs += 2;
+      curBack = matches[offs + 1];
+      GetPosSlot2(curBack, posSlot);
+      for (lenTest = /*2*/ startLen; ; lenTest++)
+      {
+        UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
+        UInt32 lenToPosState = GetLenToPosState(lenTest);
+        COptimal *opt;
+        if (curBack < kNumFullDistances)
+          curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
+        else
+          curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
+        
+        opt = &p->opt[cur + lenTest];
+        if (curAndLenPrice < opt->price)
+        {
+          opt->price = curAndLenPrice;
+          opt->posPrev = cur;
+          opt->backPrev = curBack + LZMA_NUM_REPS;
+          opt->prev1IsChar = False;
+        }
+
+        if (/*_maxMode && */lenTest == matches[offs])
+        {
+          /* Try Match + Literal + Rep0 */
+          const Byte *data2 = data - (curBack + 1);
+          UInt32 lenTest2 = lenTest + 1;
+          UInt32 limit = lenTest2 + p->numFastBytes;
+          UInt32 nextRepMatchPrice;
+          if (limit > numAvailFull)
+            limit = numAvailFull;
+          for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+          lenTest2 -= lenTest + 1;
+          if (lenTest2 >= 2)
+          {
+            UInt32 state2 = kMatchNextStates[state];
+            UInt32 posStateNext = (position + lenTest) & p->pbMask;
+            UInt32 curAndLenCharPrice = curAndLenPrice +
+                GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+                LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+                    data[lenTest], data2[lenTest], p->ProbPrices);
+            state2 = kLiteralNextStates[state2];
+            posStateNext = (posStateNext + 1) & p->pbMask;
+            nextRepMatchPrice = curAndLenCharPrice +
+                GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+                GET_PRICE_1(p->isRep[state2]);
+            
+            /* for (; lenTest2 >= 2; lenTest2--) */
+            {
+              UInt32 offset = cur + lenTest + 1 + lenTest2;
+              UInt32 curAndLenPrice;
+              COptimal *opt;
+              while (lenEnd < offset)
+                p->opt[++lenEnd].price = kInfinityPrice;
+              curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+              opt = &p->opt[offset];
+              if (curAndLenPrice < opt->price)
+              {
+                opt->price = curAndLenPrice;
+                opt->posPrev = cur + lenTest + 1;
+                opt->backPrev = 0;
+                opt->prev1IsChar = True;
+                opt->prev2 = True;
+                opt->posPrev2 = cur;
+                opt->backPrev2 = curBack + LZMA_NUM_REPS;
+              }
+            }
+          }
+          offs += 2;
+          if (offs == numPairs)
+            break;
+          curBack = matches[offs + 1];
+          if (curBack >= kNumFullDistances)
+            GetPosSlot2(curBack, posSlot);
+        }
+      }
+    }
+  }
+}
+
+#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
+
+static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
+{
+  UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
+  const Byte *data;
+  const UInt32 *matches;
+
+  if (p->additionalOffset == 0)
+    mainLen = ReadMatchDistances(p, &numPairs);
+  else
+  {
+    mainLen = p->longestMatchLength;
+    numPairs = p->numPairs;
+  }
+
+  numAvail = p->numAvail;
+  *backRes = (UInt32)-1;
+  if (numAvail < 2)
+    return 1;
+  if (numAvail > LZMA_MATCH_LEN_MAX)
+    numAvail = LZMA_MATCH_LEN_MAX;
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+
+  repLen = repIndex = 0;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 len;
+    const Byte *data2 = data - (p->reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+      continue;
+    for (len = 2; len < numAvail && data[len] == data2[len]; len++);
+    if (len >= p->numFastBytes)
+    {
+      *backRes = i;
+      MovePos(p, len - 1);
+      return len;
+    }
+    if (len > repLen)
+    {
+      repIndex = i;
+      repLen = len;
+    }
+  }
+
+  matches = p->matches;
+  if (mainLen >= p->numFastBytes)
+  {
+    *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+    MovePos(p, mainLen - 1);
+    return mainLen;
+  }
+
+  mainDist = 0; /* for GCC */
+  if (mainLen >= 2)
+  {
+    mainDist = matches[numPairs - 1];
+    while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
+    {
+      if (!ChangePair(matches[numPairs - 3], mainDist))
+        break;
+      numPairs -= 2;
+      mainLen = matches[numPairs - 2];
+      mainDist = matches[numPairs - 1];
+    }
+    if (mainLen == 2 && mainDist >= 0x80)
+      mainLen = 1;
+  }
+
+  if (repLen >= 2 && (
+        (repLen + 1 >= mainLen) ||
+        (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
+        (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
+  {
+    *backRes = repIndex;
+    MovePos(p, repLen - 1);
+    return repLen;
+  }
+  
+  if (mainLen < 2 || numAvail <= 2)
+    return 1;
+
+  p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
+  if (p->longestMatchLength >= 2)
+  {
+    UInt32 newDistance = matches[p->numPairs - 1];
+    if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
+        (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
+        (p->longestMatchLength > mainLen + 1) ||
+        (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
+      return 1;
+  }
+  
+  data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+  for (i = 0; i < LZMA_NUM_REPS; i++)
+  {
+    UInt32 len, limit;
+    const Byte *data2 = data - (p->reps[i] + 1);
+    if (data[0] != data2[0] || data[1] != data2[1])
+      continue;
+    limit = mainLen - 1;
+    for (len = 2; len < limit && data[len] == data2[len]; len++);
+    if (len >= limit)
+      return 1;
+  }
+  *backRes = mainDist + LZMA_NUM_REPS;
+  MovePos(p, mainLen - 2);
+  return mainLen;
+}
+
+static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
+{
+  UInt32 len;
+  RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+  RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+  p->state = kMatchNextStates[p->state];
+  len = LZMA_MATCH_LEN_MIN;
+  LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+  RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
+  RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
+  RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
+}
+
+static SRes CheckErrors(CLzmaEnc *p)
+{
+  if (p->result != SZ_OK)
+    return p->result;
+  if (p->rc.res != SZ_OK)
+    p->result = SZ_ERROR_WRITE;
+  if (p->matchFinderBase.result != SZ_OK)
+    p->result = SZ_ERROR_READ;
+  if (p->result != SZ_OK)
+    p->finished = True;
+  return p->result;
+}
+
+static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
+{
+  /* ReleaseMFStream(); */
+  p->finished = True;
+  if (p->writeEndMark)
+    WriteEndMarker(p, nowPos & p->pbMask);
+  RangeEnc_FlushData(&p->rc);
+  RangeEnc_FlushStream(&p->rc);
+  return CheckErrors(p);
+}
+
+static void FillAlignPrices(CLzmaEnc *p)
+{
+  UInt32 i;
+  for (i = 0; i < kAlignTableSize; i++)
+    p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
+  p->alignPriceCount = 0;
+}
+
+static void FillDistancesPrices(CLzmaEnc *p)
+{
+  UInt32 tempPrices[kNumFullDistances];
+  UInt32 i, lenToPosState;
+  for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
+  {
+    UInt32 posSlot = GetPosSlot1(i);
+    UInt32 footerBits = ((posSlot >> 1) - 1);
+    UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+    tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
+  }
+
+  for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
+  {
+    UInt32 posSlot;
+    const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
+    UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
+    for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
+      posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
+    for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
+      posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
+
+    {
+      UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
+      UInt32 i;
+      for (i = 0; i < kStartPosModelIndex; i++)
+        distancesPrices[i] = posSlotPrices[i];
+      for (; i < kNumFullDistances; i++)
+        distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
+    }
+  }
+  p->matchPriceCount = 0;
+}
+
+static void LzmaEnc_Construct(CLzmaEnc *p)
+{
+  RangeEnc_Construct(&p->rc);
+  MatchFinder_Construct(&p->matchFinderBase);
+  #ifndef _7ZIP_ST
+  MatchFinderMt_Construct(&p->matchFinderMt);
+  p->matchFinderMt.MatchFinder = &p->matchFinderBase;
+  #endif
+
+  {
+    CLzmaEncProps props;
+    LzmaEncProps_Init(&props);
+    LzmaEnc_SetProps(p, &props);
+  }
+
+  #ifndef LZMA_LOG_BSR
+  LzmaEnc_FastPosInit(p->g_FastPos);
+  #endif
+
+  LzmaEnc_InitPriceTables(p->ProbPrices);
+  p->litProbs = 0;
+  p->saveState.litProbs = 0;
+}
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
+{
+  void *p;
+  p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
+  if (p != 0)
+    LzmaEnc_Construct((CLzmaEnc *)p);
+  return p;
+}
+
+static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
+{
+  alloc->Free(alloc, p->litProbs);
+  alloc->Free(alloc, p->saveState.litProbs);
+  p->litProbs = 0;
+  p->saveState.litProbs = 0;
+}
+
+static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  #ifndef _7ZIP_ST
+  MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
+  #endif
+  MatchFinder_Free(&p->matchFinderBase, allocBig);
+  LzmaEnc_FreeLits(p, alloc);
+  RangeEnc_Free(&p->rc, alloc);
+}
+
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
+  alloc->Free(alloc, p);
+}
+
+static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
+{
+  UInt32 nowPos32, startPos32;
+  if (p->needInit)
+  {
+    p->matchFinder.Init(p->matchFinderObj);
+    p->needInit = 0;
+  }
+
+  if (p->finished)
+    return p->result;
+  RINOK(CheckErrors(p));
+
+  nowPos32 = (UInt32)p->nowPos64;
+  startPos32 = nowPos32;
+
+  if (p->nowPos64 == 0)
+  {
+    UInt32 numPairs;
+    Byte curByte;
+    if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+      return Flush(p, nowPos32);
+    ReadMatchDistances(p, &numPairs);
+    RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
+    p->state = kLiteralNextStates[p->state];
+    curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
+    LitEnc_Encode(&p->rc, p->litProbs, curByte);
+    p->additionalOffset--;
+    nowPos32++;
+  }
+
+  if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
+  for (;;)
+  {
+    UInt32 pos, len, posState;
+
+    if (p->fastMode)
+      len = GetOptimumFast(p, &pos);
+    else
+      len = GetOptimum(p, nowPos32, &pos);
+
+    #ifdef SHOW_STAT2
+    printf("\n pos = %4X,   len = %d   pos = %d", nowPos32, len, pos);
+    #endif
+
+    posState = nowPos32 & p->pbMask;
+    if (len == 1 && pos == (UInt32)-1)
+    {
+      Byte curByte;
+      CLzmaProb *probs;
+      const Byte *data;
+
+      RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
+      data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+      curByte = *data;
+      probs = LIT_PROBS(nowPos32, *(data - 1));
+      if (IsCharState(p->state))
+        LitEnc_Encode(&p->rc, probs, curByte);
+      else
+        LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
+      p->state = kLiteralNextStates[p->state];
+    }
+    else
+    {
+      RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+      if (pos < LZMA_NUM_REPS)
+      {
+        RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
+        if (pos == 0)
+        {
+          RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
+          RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
+        }
+        else
+        {
+          UInt32 distance = p->reps[pos];
+          RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
+          if (pos == 1)
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
+          else
+          {
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
+            RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
+            if (pos == 3)
+              p->reps[3] = p->reps[2];
+            p->reps[2] = p->reps[1];
+          }
+          p->reps[1] = p->reps[0];
+          p->reps[0] = distance;
+        }
+        if (len == 1)
+          p->state = kShortRepNextStates[p->state];
+        else
+        {
+          LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+          p->state = kRepNextStates[p->state];
+        }
+      }
+      else
+      {
+        UInt32 posSlot;
+        RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+        p->state = kMatchNextStates[p->state];
+        LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+        pos -= LZMA_NUM_REPS;
+        GetPosSlot(pos, posSlot);
+        RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
+        
+        if (posSlot >= kStartPosModelIndex)
+        {
+          UInt32 footerBits = ((posSlot >> 1) - 1);
+          UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+          UInt32 posReduced = pos - base;
+
+          if (posSlot < kEndPosModelIndex)
+            RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
+          else
+          {
+            RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
+            RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
+            p->alignPriceCount++;
+          }
+        }
+        p->reps[3] = p->reps[2];
+        p->reps[2] = p->reps[1];
+        p->reps[1] = p->reps[0];
+        p->reps[0] = pos;
+        p->matchPriceCount++;
+      }
+    }
+    p->additionalOffset -= len;
+    nowPos32 += len;
+    if (p->additionalOffset == 0)
+    {
+      UInt32 processed;
+      if (!p->fastMode)
+      {
+        if (p->matchPriceCount >= (1 << 7))
+          FillDistancesPrices(p);
+        if (p->alignPriceCount >= kAlignTableSize)
+          FillAlignPrices(p);
+      }
+      if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+        break;
+      processed = nowPos32 - startPos32;
+      if (useLimits)
+      {
+        if (processed + kNumOpts + 300 >= maxUnpackSize ||
+            RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
+          break;
+      }
+      else if (processed >= (1 << 15))
+      {
+        p->nowPos64 += nowPos32 - startPos32;
+        return CheckErrors(p);
+      }
+    }
+  }
+  p->nowPos64 += nowPos32 - startPos32;
+  return Flush(p, nowPos32);
+}
+
+#define kBigHashDicLimit ((UInt32)1 << 24)
+
+static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  UInt32 beforeSize = kNumOpts;
+  Bool btMode;
+  if (!RangeEnc_Alloc(&p->rc, alloc))
+    return SZ_ERROR_MEM;
+  btMode = (p->matchFinderBase.btMode != 0);
+  #ifndef _7ZIP_ST
+  p->mtMode = (p->multiThread && !p->fastMode && btMode);
+  #endif
+
+  {
+    unsigned lclp = p->lc + p->lp;
+    if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
+    {
+      LzmaEnc_FreeLits(p, alloc);
+      p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+      p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+      if (p->litProbs == 0 || p->saveState.litProbs == 0)
+      {
+        LzmaEnc_FreeLits(p, alloc);
+        return SZ_ERROR_MEM;
+      }
+      p->lclp = lclp;
+    }
+  }
+
+  p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
+
+  if (beforeSize + p->dictSize < keepWindowSize)
+    beforeSize = keepWindowSize - p->dictSize;
+
+  #ifndef _7ZIP_ST
+  if (p->mtMode)
+  {
+    RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
+    p->matchFinderObj = &p->matchFinderMt;
+    MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
+  }
+  else
+  #endif
+  {
+    if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
+      return SZ_ERROR_MEM;
+    p->matchFinderObj = &p->matchFinderBase;
+    MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
+  }
+  return SZ_OK;
+}
+
+static void LzmaEnc_Init(CLzmaEnc *p)
+{
+  UInt32 i;
+  p->state = 0;
+  for (i = 0 ; i < LZMA_NUM_REPS; i++)
+    p->reps[i] = 0;
+
+  RangeEnc_Init(&p->rc);
+
+
+  for (i = 0; i < kNumStates; i++)
+  {
+    UInt32 j;
+    for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
+    {
+      p->isMatch[i][j] = kProbInitValue;
+      p->isRep0Long[i][j] = kProbInitValue;
+    }
+    p->isRep[i] = kProbInitValue;
+    p->isRepG0[i] = kProbInitValue;
+    p->isRepG1[i] = kProbInitValue;
+    p->isRepG2[i] = kProbInitValue;
+  }
+
+  {
+    UInt32 num = 0x300 << (p->lp + p->lc);
+    for (i = 0; i < num; i++)
+      p->litProbs[i] = kProbInitValue;
+  }
+
+  {
+    for (i = 0; i < kNumLenToPosStates; i++)
+    {
+      CLzmaProb *probs = p->posSlotEncoder[i];
+      UInt32 j;
+      for (j = 0; j < (1 << kNumPosSlotBits); j++)
+        probs[j] = kProbInitValue;
+    }
+  }
+  {
+    for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
+      p->posEncoders[i] = kProbInitValue;
+  }
+
+  LenEnc_Init(&p->lenEnc.p);
+  LenEnc_Init(&p->repLenEnc.p);
+
+  for (i = 0; i < (1 << kNumAlignBits); i++)
+    p->posAlignEncoder[i] = kProbInitValue;
+
+  p->optimumEndIndex = 0;
+  p->optimumCurrentIndex = 0;
+  p->additionalOffset = 0;
+
+  p->pbMask = (1 << p->pb) - 1;
+  p->lpMask = (1 << p->lp) - 1;
+}
+
+static void LzmaEnc_InitPrices(CLzmaEnc *p)
+{
+  if (!p->fastMode)
+  {
+    FillDistancesPrices(p);
+    FillAlignPrices(p);
+  }
+
+  p->lenEnc.tableSize =
+  p->repLenEnc.tableSize =
+      p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
+  LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
+  LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
+}
+
+static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  UInt32 i;
+  for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
+    if (p->dictSize <= ((UInt32)1 << i))
+      break;
+  p->distTableSize = i * 2;
+
+  p->finished = False;
+  p->result = SZ_OK;
+  RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
+  LzmaEnc_Init(p);
+  LzmaEnc_InitPrices(p);
+  p->nowPos64 = 0;
+  return SZ_OK;
+}
+
+static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
+{
+  p->matchFinderBase.directInput = 1;
+  p->matchFinderBase.bufferBase = (Byte *)src;
+  p->matchFinderBase.directInputRem = srcLen;
+}
+
+static SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+    UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  LzmaEnc_SetInputBuf(p, src, srcLen);
+  p->needInit = 1;
+
+  return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+static void LzmaEnc_Finish(CLzmaEncHandle pp)
+{
+  #ifndef _7ZIP_ST
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  if (p->mtMode)
+    MatchFinderMt_ReleaseStream(&p->matchFinderMt);
+  #else
+  pp = pp;
+  #endif
+}
+
+typedef struct
+{
+  ISeqOutStream funcTable;
+  Byte *data;
+  SizeT rem;
+  Bool overflow;
+} CSeqOutStreamBuf;
+
+static size_t MyWrite(void *pp, const void *data, size_t size)
+{
+  CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
+  if (p->rem < size)
+  {
+    size = p->rem;
+    p->overflow = True;
+  }
+  memcpy(p->data, data, size);
+  p->rem -= size;
+  p->data += size;
+  return size;
+}
+
+static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
+{
+  SRes res = SZ_OK;
+
+  #ifndef _7ZIP_ST
+  Byte allocaDummy[0x300];
+  int i = 0;
+  for (i = 0; i < 16; i++)
+    allocaDummy[i] = (Byte)i;
+  #endif
+
+  for (;;)
+  {
+    res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
+    if (res != SZ_OK || p->finished != 0)
+      break;
+    if (progress != 0)
+    {
+      res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
+      if (res != SZ_OK)
+      {
+        res = SZ_ERROR_PROGRESS;
+        break;
+      }
+    }
+  }
+  LzmaEnc_Finish(p);
+  return res;
+}
+
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
+{
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+  int i;
+  UInt32 dictSize = p->dictSize;
+  if (*size < LZMA_PROPS_SIZE)
+    return SZ_ERROR_PARAM;
+  *size = LZMA_PROPS_SIZE;
+  props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
+
+  for (i = 11; i <= 30; i++)
+  {
+    if (dictSize <= ((UInt32)2 << i))
+    {
+      dictSize = (2 << i);
+      break;
+    }
+    if (dictSize <= ((UInt32)3 << i))
+    {
+      dictSize = (3 << i);
+      break;
+    }
+  }
+
+  for (i = 0; i < 4; i++)
+    props[1 + i] = (Byte)(dictSize >> (8 * i));
+  return SZ_OK;
+}
+
+SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+    int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+  SRes res;
+  CLzmaEnc *p = (CLzmaEnc *)pp;
+
+  CSeqOutStreamBuf outStream;
+
+  LzmaEnc_SetInputBuf(p, src, srcLen);
+
+  outStream.funcTable.Write = MyWrite;
+  outStream.data = dest;
+  outStream.rem = *destLen;
+  outStream.overflow = False;
+
+  p->writeEndMark = writeEndMark;
+
+  p->rc.outStream = &outStream.funcTable;
+  res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
+  if (res == SZ_OK)
+    res = LzmaEnc_Encode2(p, progress);
+
+  *destLen -= outStream.rem;
+  if (outStream.overflow)
+    return SZ_ERROR_OUTPUT_EOF;
+  return res;
+}
diff --git a/lib/lzma/Makefile b/lib/lzma/Makefile
new file mode 100644
index 000000000..02e799c99
--- /dev/null
+++ b/lib/lzma/Makefile
@@ -0,0 +1,7 @@
+lzma_compress-objs := LzFind.o LzmaEnc.o
+lzma_decompress-objs := LzmaDec.o
+
+obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
+obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
+
+EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0967771d8..5d27eae49 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -669,8 +669,10 @@ char *symbol_string(char *buf, char *end, void *ptr,
 		    struct printf_spec spec, const char *fmt)
 {
 	unsigned long value;
-#ifdef CONFIG_KALLSYMS
 	char sym[KSYM_SYMBOL_LEN];
+#ifndef CONFIG_KALLSYMS
+	struct module *mod;
+	int len;
 #endif
 
 	if (fmt[1] == 'R')
@@ -684,11 +686,16 @@ char *symbol_string(char *buf, char *end, void *ptr,
 		sprint_symbol(sym, value);
 	else
 		sprint_symbol_no_offset(sym, value);
-
-	return string(buf, end, sym, spec);
 #else
-	return special_hex_number(buf, end, value, sizeof(void *));
+	len = snprintf(sym, sizeof(sym), "0x%lx", value);
+
+	mod = __module_address(value);
+	if (mod)
+		snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]",
+			 mod->name, mod->core_layout.base,
+			 mod->core_layout.size);
 #endif
+	return string(buf, end, sym, spec);
 }
 
 static noinline_for_stack
diff --git a/net/Makefile b/net/Makefile
index 4cafaa2b4..6566175e6 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_MAC80211)		+= mac80211/
 obj-$(CONFIG_TIPC)		+= tipc/
 obj-$(CONFIG_NETLABEL)		+= netlabel/
 obj-$(CONFIG_IUCV)		+= iucv/
-obj-$(CONFIG_RFKILL)		+= rfkill/
+obj-$(CONFIG_RFKILL_FULL)	+= rfkill/
 obj-$(CONFIG_NET_9P)		+= 9p/
 obj-$(CONFIG_CAIF)		+= caif/
 ifneq ($(CONFIG_DCB),)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 8498e3503..e671a4d44 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -141,6 +141,9 @@ static int deliver_clone(const struct net_bridge_port *prev,
 void br_forward(const struct net_bridge_port *to,
 		struct sk_buff *skb, bool local_rcv, bool local_orig)
 {
+	if (to->flags & BR_ISOLATE_MODE)
+		to = NULL;
+
 	if (to && should_deliver(to, skb)) {
 		if (local_rcv)
 			deliver_clone(to, skb, local_orig);
@@ -174,6 +177,29 @@ out:
 	return p;
 }
 
+static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+			       const unsigned char *addr, bool local_orig)
+{
+	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+	const unsigned char *src = eth_hdr(skb)->h_source;
+
+	if (!should_deliver(p, skb))
+		return;
+
+	/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
+	if (skb->dev == p->dev && ether_addr_equal(src, addr))
+		return;
+
+	skb = skb_copy(skb, GFP_ATOMIC);
+	if (!skb) {
+		dev->stats.tx_dropped++;
+		return;
+	}
+
+	memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+	__br_forward(p, skb, local_orig);
+}
+
 /* called under rcu_read_lock */
 void br_flood(struct net_bridge *br, struct sk_buff *skb,
 	      enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
@@ -183,6 +209,8 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
 	struct net_bridge_port *p;
 
 	list_for_each_entry_rcu(p, &br->port_list, list) {
+		if (!local_orig && (p->flags & BR_ISOLATE_MODE))
+			continue;
 		/* Do not flood unicast traffic to ports that turn it off */
 		if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
 			continue;
@@ -242,10 +270,20 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
 		rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
 			     NULL;
 
-		port = (unsigned long)lport > (unsigned long)rport ?
-		       lport : rport;
+		if ((unsigned long)lport > (unsigned long)rport) {
+			port = lport;
+
+			if (p->flags & MDB_PG_FLAGS_MCAST_TO_UCAST) {
+				maybe_deliver_addr(lport, skb, p->eth_addr,
+						   local_orig);
+				goto delivered;
+			}
+		} else {
+			port = rport;
+		}
 
 		prev = maybe_deliver(prev, port, skb, local_orig);
+delivered:
 		if (IS_ERR(prev))
 			goto out;
 		if (prev == port)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 267b46af4..d1d189b64 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -164,14 +164,20 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
 		}
 	}
 
+	BR_INPUT_SKB_CB(skb)->brdev = br->dev;
+
+	if (skb->protocol == htons(ETH_P_PAE))
+		return br_pass_frame_up(skb);
+
 	if (p->state == BR_STATE_LEARNING)
 		goto drop;
 
-	BR_INPUT_SKB_CB(skb)->brdev = br->dev;
-
 	if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
 		br_do_proxy_arp(skb, br, vid, p);
 
+	if (p->flags & BR_ISOLATE_MODE)
+		return br_pass_frame_up(skb);
+
 	switch (pkt_type) {
 	case BR_PKT_MULTICAST:
 		mdst = br_mdb_get(br, skb, vid);
@@ -233,7 +239,8 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
 {
 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 
-	__br_handle_local_finish(skb);
+	if (p->state != BR_STATE_DISABLED)
+		__br_handle_local_finish(skb);
 
 	BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
 	br_pass_frame_up(skb);
@@ -316,6 +323,15 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
 
 forward:
 	switch (p->state) {
+	case BR_STATE_DISABLED:
+		if (ether_addr_equal(p->br->dev->dev_addr, dest))
+			skb->pkt_type = PACKET_HOST;
+
+		NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
+			dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+			br_handle_local_finish);
+		break;
+
 	case BR_STATE_FORWARDING:
 		rhook = rcu_dereference(br_should_route_hook);
 		if (rhook) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6406010e1..57e94a1b5 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -532,7 +532,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 			break;
 	}
 
-	p = br_multicast_new_port_group(port, group, *pp, state);
+	p = br_multicast_new_port_group(port, group, *pp, state, NULL);
 	if (unlikely(!p))
 		return -ENOMEM;
 	rcu_assign_pointer(*pp, p);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2136e45f5..8cc0c252f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -42,12 +42,14 @@ static void br_multicast_add_router(struct net_bridge *br,
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
 					 struct net_bridge_port *port,
 					 __be32 group,
-					 __u16 vid);
+					 __u16 vid,
+					 const unsigned char *src);
+
 #if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
 					 struct net_bridge_port *port,
 					 const struct in6_addr *group,
-					 __u16 vid);
+					 __u16 vid, const unsigned char *src);
 #endif
 unsigned int br_mdb_rehash_seq;
 
@@ -658,7 +660,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
 			struct net_bridge_port *port,
 			struct br_ip *group,
 			struct net_bridge_port_group __rcu *next,
-			unsigned char flags)
+			unsigned char flags,
+			const unsigned char *src)
 {
 	struct net_bridge_port_group *p;
 
@@ -673,12 +676,39 @@ struct net_bridge_port_group *br_multicast_new_port_group(
 	hlist_add_head(&p->mglist, &port->mglist);
 	setup_timer(&p->timer, br_multicast_port_group_expired,
 		    (unsigned long)p);
+
+	if ((port->flags & BR_MULTICAST_TO_UNICAST) && src) {
+		memcpy(p->eth_addr, src, ETH_ALEN);
+		p->flags |= MDB_PG_FLAGS_MCAST_TO_UCAST;
+	}
+
 	return p;
 }
 
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+				struct net_bridge_port *port,
+				const unsigned char *src)
+{
+	if (p->port != port)
+		return false;
+
+	if (!(p->flags & MDB_PG_FLAGS_MCAST_TO_UCAST) !=
+	    !(port->flags & BR_MULTICAST_TO_UNICAST))
+		return false;
+
+	if (!(p->flags & MDB_PG_FLAGS_MCAST_TO_UCAST))
+		return true;
+
+	if (!src)
+		return false;
+
+	return ether_addr_equal(src, p->eth_addr);
+}
+
 static int br_multicast_add_group(struct net_bridge *br,
 				  struct net_bridge_port *port,
-				  struct br_ip *group)
+				  struct br_ip *group,
+				  const unsigned char *src)
 {
 	struct net_bridge_mdb_entry *mp;
 	struct net_bridge_port_group *p;
@@ -705,13 +735,13 @@ static int br_multicast_add_group(struct net_bridge *br,
 	for (pp = &mp->ports;
 	     (p = mlock_dereference(*pp, br)) != NULL;
 	     pp = &p->next) {
-		if (p->port == port)
+		if (br_port_group_equal(p, port, src))
 			goto found;
 		if ((unsigned long)p->port < (unsigned long)port)
 			break;
 	}
 
-	p = br_multicast_new_port_group(port, group, *pp, 0);
+	p = br_multicast_new_port_group(port, group, *pp, 0, src);
 	if (unlikely(!p))
 		goto err;
 	rcu_assign_pointer(*pp, p);
@@ -730,7 +760,8 @@ err:
 static int br_ip4_multicast_add_group(struct net_bridge *br,
 				      struct net_bridge_port *port,
 				      __be32 group,
-				      __u16 vid)
+				      __u16 vid,
+				      const unsigned char *src)
 {
 	struct br_ip br_group;
 
@@ -741,14 +772,15 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
 	br_group.proto = htons(ETH_P_IP);
 	br_group.vid = vid;
 
-	return br_multicast_add_group(br, port, &br_group);
+	return br_multicast_add_group(br, port, &br_group, src);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
 				      struct net_bridge_port *port,
 				      const struct in6_addr *group,
-				      __u16 vid)
+				      __u16 vid,
+				      const unsigned char *src)
 {
 	struct br_ip br_group;
 
@@ -759,7 +791,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
 	br_group.proto = htons(ETH_P_IPV6);
 	br_group.vid = vid;
 
-	return br_multicast_add_group(br, port, &br_group);
+	return br_multicast_add_group(br, port, &br_group, src);
 }
 #endif
 
@@ -1028,6 +1060,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
 					 struct sk_buff *skb,
 					 u16 vid)
 {
+	const unsigned char *src;
 	struct igmpv3_report *ih;
 	struct igmpv3_grec *grec;
 	int i;
@@ -1068,12 +1101,14 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
 			continue;
 		}
 
+		src = eth_hdr(skb)->h_source;
 		if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
 		     type == IGMPV3_MODE_IS_INCLUDE) &&
 		    ntohs(grec->grec_nsrcs) == 0) {
-			br_ip4_multicast_leave_group(br, port, group, vid);
+			br_ip4_multicast_leave_group(br, port, group, vid, src);
 		} else {
-			err = br_ip4_multicast_add_group(br, port, group, vid);
+			err = br_ip4_multicast_add_group(br, port, group, vid,
+							 src);
 			if (err)
 				break;
 		}
@@ -1088,6 +1123,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 					struct sk_buff *skb,
 					u16 vid)
 {
+	const unsigned char *src = eth_hdr(skb)->h_source;
 	struct icmp6hdr *icmp6h;
 	struct mld2_grec *grec;
 	int i;
@@ -1139,10 +1175,11 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 		     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
 		    ntohs(*nsrcs) == 0) {
 			br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
-						     vid);
+						     vid, src);
 		} else {
 			err = br_ip6_multicast_add_group(br, port,
-							 &grec->grec_mca, vid);
+							 &grec->grec_mca, vid,
+							 src);
 			if (err)
 				break;
 		}
@@ -1458,7 +1495,8 @@ br_multicast_leave_group(struct net_bridge *br,
 			 struct net_bridge_port *port,
 			 struct br_ip *group,
 			 struct bridge_mcast_other_query *other_query,
-			 struct bridge_mcast_own_query *own_query)
+			 struct bridge_mcast_own_query *own_query,
+			 const unsigned char *src)
 {
 	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
@@ -1482,7 +1520,7 @@ br_multicast_leave_group(struct net_bridge *br,
 		for (pp = &mp->ports;
 		     (p = mlock_dereference(*pp, br)) != NULL;
 		     pp = &p->next) {
-			if (p->port != port)
+			if (!br_port_group_equal(p, port, src))
 				continue;
 
 			rcu_assign_pointer(*pp, p->next);
@@ -1513,7 +1551,7 @@ br_multicast_leave_group(struct net_bridge *br,
 		for (p = mlock_dereference(mp->ports, br);
 		     p != NULL;
 		     p = mlock_dereference(p->next, br)) {
-			if (p->port != port)
+			if (!br_port_group_equal(p, port, src))
 				continue;
 
 			if (!hlist_unhashed(&p->mglist) &&
@@ -1564,7 +1602,8 @@ out:
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
 					 struct net_bridge_port *port,
 					 __be32 group,
-					 __u16 vid)
+					 __u16 vid,
+					 const unsigned char *src)
 {
 	struct br_ip br_group;
 	struct bridge_mcast_own_query *own_query;
@@ -1579,14 +1618,15 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 	br_group.vid = vid;
 
 	br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
-				 own_query);
+				 own_query, src);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
 					 struct net_bridge_port *port,
 					 const struct in6_addr *group,
-					 __u16 vid)
+					 __u16 vid,
+					 const unsigned char *src)
 {
 	struct br_ip br_group;
 	struct bridge_mcast_own_query *own_query;
@@ -1601,7 +1641,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 	br_group.vid = vid;
 
 	br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
-				 own_query);
+				 own_query, src);
 }
 #endif
 
@@ -1644,6 +1684,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
 				 u16 vid)
 {
 	struct sk_buff *skb_trimmed = NULL;
+	const unsigned char *src;
 	struct igmphdr *ih;
 	int err;
 
@@ -1659,13 +1700,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
 	}
 
 	ih = igmp_hdr(skb);
+	src = eth_hdr(skb)->h_source;
 	BR_INPUT_SKB_CB(skb)->igmp = ih->type;
 
 	switch (ih->type) {
 	case IGMP_HOST_MEMBERSHIP_REPORT:
 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-		err = br_ip4_multicast_add_group(br, port, ih->group, vid);
+		err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
 		break;
 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
 		err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
@@ -1674,7 +1716,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
 		err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
 		break;
 	case IGMP_HOST_LEAVE_MESSAGE:
-		br_ip4_multicast_leave_group(br, port, ih->group, vid);
+		br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
 		break;
 	}
 
@@ -1694,6 +1736,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 				 u16 vid)
 {
 	struct sk_buff *skb_trimmed = NULL;
+	const unsigned char *src;
 	struct mld_msg *mld;
 	int err;
 
@@ -1713,8 +1756,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 
 	switch (mld->mld_type) {
 	case ICMPV6_MGM_REPORT:
+		src = eth_hdr(skb)->h_source;
 		BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
+		err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
+						 src);
 		break;
 	case ICMPV6_MLD2_REPORT:
 		err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
@@ -1723,7 +1768,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 		err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
 		break;
 	case ICMPV6_MGM_REDUCTION:
-		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
+		src = eth_hdr(skb)->h_source;
+		br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
 		break;
 	}
 
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5d4006e58..b8ff1479a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -123,6 +123,7 @@ static inline size_t br_port_info_size(void)
 		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */
 		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
+		+ nla_total_size(1)	/* IFLA_BRPORT_MCAST_TO_UCAST */
 		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
@@ -173,6 +174,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
 		       !!(p->flags & BR_ROOT_BLOCK)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
 		       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
+	    nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
+		       !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
 		       !!(p->flags & BR_FLOOD)) ||
@@ -586,6 +589,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
 	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
 	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
 	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
+	[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -636,6 +640,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
 	br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
 	br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
 	br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
+	br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
 	br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1b63177e0..2be5687c6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -166,8 +166,9 @@ struct net_bridge_fdb_entry
 	struct rcu_head			rcu;
 };
 
-#define MDB_PG_FLAGS_PERMANENT	BIT(0)
-#define MDB_PG_FLAGS_OFFLOAD	BIT(1)
+#define MDB_PG_FLAGS_PERMANENT		BIT(0)
+#define MDB_PG_FLAGS_OFFLOAD		BIT(1)
+#define MDB_PG_FLAGS_MCAST_TO_UCAST	BIT(2)
 
 struct net_bridge_port_group {
 	struct net_bridge_port		*port;
@@ -177,6 +178,7 @@ struct net_bridge_port_group {
 	struct timer_list		timer;
 	struct br_ip			addr;
 	unsigned char			flags;
+	unsigned char			eth_addr[ETH_ALEN];
 };
 
 struct net_bridge_mdb_entry
@@ -591,7 +593,7 @@ void br_multicast_free_pg(struct rcu_head *head);
 struct net_bridge_port_group *
 br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
 			    struct net_bridge_port_group __rcu *next,
-			    unsigned char flags);
+			    unsigned char flags, const unsigned char *src);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 8bd569695..8b5d08f1a 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -172,6 +172,7 @@ BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
 BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP);
 BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI);
 BRPORT_ATTR_FLAG(multicast_flood, BR_MCAST_FLOOD);
+BRPORT_ATTR_FLAG(isolate_mode, BR_ISOLATE_MODE);
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -188,6 +189,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
 		   store_multicast_router);
 
 BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
+BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UNICAST);
 #endif
 
 static const struct brport_attribute *brport_attrs[] = {
@@ -214,10 +216,12 @@ static const struct brport_attribute *brport_attrs[] = {
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	&brport_attr_multicast_router,
 	&brport_attr_multicast_fast_leave,
+	&brport_attr_multicast_to_unicast,
 #endif
 	&brport_attr_proxyarp,
 	&brport_attr_proxyarp_wifi,
 	&brport_attr_multicast_flood,
+	&brport_attr_isolate_mode,
 	NULL
 };
 
diff --git a/net/core/dev.c b/net/core/dev.c
index a4562c0ac..cd01c4697 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4532,6 +4532,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 	enum gro_result ret;
 	int grow;
 
+	if (skb->gro_skip)
+		goto normal;
+
 	if (!(skb->dev->features & NETIF_F_GRO))
 		goto normal;
 
@@ -5820,6 +5823,48 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 					   &upper_dev->adj_list.lower);
 }
 
+static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr,
+			       struct net_device *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->addr_len; i++)
+		mask[i] |= addr[i] ^ dev->dev_addr[i];
+}
+
+static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev,
+				struct net_device *lower)
+{
+	struct net_device *cur;
+	struct list_head *iter;
+
+	netdev_for_each_upper_dev_rcu(dev, cur, iter) {
+		__netdev_addr_mask(mask, cur->dev_addr, lower);
+		__netdev_upper_mask(mask, cur, lower);
+	}
+}
+
+static void __netdev_update_addr_mask(struct net_device *dev)
+{
+	unsigned char mask[MAX_ADDR_LEN];
+	struct net_device *cur;
+	struct list_head *iter;
+
+	memset(mask, 0, sizeof(mask));
+	__netdev_upper_mask(mask, dev, dev);
+	memcpy(dev->local_addr_mask, mask, dev->addr_len);
+
+	netdev_for_each_lower_dev(dev, cur, iter)
+		__netdev_update_addr_mask(cur);
+}
+
+static void netdev_update_addr_mask(struct net_device *dev)
+{
+	rcu_read_lock();
+	__netdev_update_addr_mask(dev);
+	rcu_read_unlock();
+}
+
 static int __netdev_upper_dev_link(struct net_device *dev,
 				   struct net_device *upper_dev, bool master,
 				   void *upper_priv, void *upper_info)
@@ -6018,6 +6063,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
 		__netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
 
+	netdev_update_addr_mask(dev);
+	netdev_update_addr_mask(dev);
 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
 				      &changeupper_info.info);
 }
@@ -6618,6 +6665,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
 	if (err)
 		return err;
 	dev->addr_assign_type = NET_ADDR_SET;
+	netdev_update_addr_mask(dev);
 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
 	add_device_randomness(dev->dev_addr, dev->addr_len);
 	return 0;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 24d7aff8d..6a6d90b9a 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -143,6 +143,18 @@ u32 eth_get_headlen(void *data, unsigned int len)
 }
 EXPORT_SYMBOL(eth_get_headlen);
 
+static inline bool
+eth_check_local_mask(const void *addr1, const void *addr2, const void *mask)
+{
+	const u16 *a1 = addr1;
+	const u16 *a2 = addr2;
+	const u16 *m = mask;
+
+	return (((a1[0] ^ a2[0]) & ~m[0]) |
+		((a1[1] ^ a2[1]) & ~m[1]) |
+		((a1[2] ^ a2[2]) & ~m[2]));
+}
+
 /**
  * eth_type_trans - determine the packet's protocol ID.
  * @skb: received socket data
@@ -171,8 +183,12 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
 			skb->pkt_type = PACKET_MULTICAST;
 	}
 	else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-						   dev->dev_addr)))
+						   dev->dev_addr))) {
 		skb->pkt_type = PACKET_OTHERHOST;
+		if (eth_check_local_mask(eth->h_dest, dev->dev_addr,
+					 dev->local_addr_mask))
+			skb->gro_skip = 1;
+	}
 
 	/*
 	 * Some variants of DSA tagging don't have an ethertype field
@@ -391,6 +407,34 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 }
 EXPORT_SYMBOL(alloc_etherdev_mqs);
 
+static void devm_free_netdev(struct device *dev, void *res)
+{
+	free_netdev(*(struct net_device **)res);
+}
+
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+					   unsigned int txqs, unsigned int rxqs)
+{
+	struct net_device **dr;
+	struct net_device *netdev;
+
+	dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL);
+	if (!dr)
+		return NULL;
+
+	netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
+	if (!netdev) {
+		devres_free(dr);
+		return NULL;
+	}
+
+	*dr = netdev;
+	devres_add(dev, dr);
+
+	return netdev;
+}
+EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
+
 ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
 {
 	return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 38c1c979e..0e4b8f608 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -138,6 +138,10 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
 		.error	= -EINVAL,
 		.scope	= RT_SCOPE_NOWHERE,
 	},
+	[RTN_POLICY_FAILED] = {
+		.error	= -EACCES,
+		.scope	= RT_SCOPE_UNIVERSE,
+	},
 };
 
 static void rt_fibinfo_free(struct rtable __rcu **rtp)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ef40bb659..e3dd1d9c1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2396,6 +2396,7 @@ static const char *const rtn_type_names[__RTN_MAX] = {
 	[RTN_THROW] = "THROW",
 	[RTN_NAT] = "NAT",
 	[RTN_XRESOLVE] = "XRESOLVE",
+	[RTN_POLICY_FAILED] = "POLICY_FAILED",
 };
 
 static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 27089f5eb..b51d1f226 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -157,6 +157,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
 	case FR_ACT_UNREACHABLE:
 		return -ENETUNREACH;
 	case FR_ACT_PROHIBIT:
+	case FR_ACT_POLICY_FAILED:
 		return -EACCES;
 	case FR_ACT_BLACKHOLE:
 	default:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 7c00ce90a..ceead096a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -58,9 +58,12 @@ ip_packet_match(const struct iphdr *ip,
 {
 	unsigned long ret;
 
-	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+	if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+		return true;
+
+	if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr &&
 		    (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
-	    NF_INVF(ipinfo, IPT_INV_DSTIP,
+	    NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr &&
 		    (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
 		return false;
 
@@ -88,6 +91,29 @@ ip_packet_match(const struct iphdr *ip,
 	return true;
 }
 
+static void
+ip_checkdefault(struct ipt_ip *ip)
+{
+	static const char iface_mask[IFNAMSIZ] = {};
+
+	if (ip->invflags || ip->flags & IPT_F_FRAG)
+		return;
+
+	if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
+		return;
+
+	if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
+		return;
+
+	if (ip->smsk.s_addr || ip->dmsk.s_addr)
+		return;
+
+	if (ip->proto)
+		return;
+
+	ip->flags |= IPT_F_NO_DEF_MATCH;
+}
+
 static bool
 ip_checkentry(const struct ipt_ip *ip)
 {
@@ -228,6 +254,33 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 	return (void *)entry + entry->next_offset;
 }
 
+static bool
+ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
+{
+	struct xt_entry_target *t;
+	struct xt_standard_target *st;
+
+	if (e->target_offset != sizeof(struct ipt_entry))
+		return false;
+
+	if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
+		return false;
+
+	t = ipt_get_target(e);
+	if (t->u.kernel.target->target)
+		return false;
+
+	st = (struct xt_standard_target *)t;
+	if (st->verdict == XT_RETURN)
+		return false;
+
+	if (st->verdict >= 0)
+		return false;
+
+	*verdict = (unsigned int)(-st->verdict) - 1;
+	return true;
+}
+
 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
 unsigned int
 ipt_do_table(struct sk_buff *skb,
@@ -248,28 +301,8 @@ ipt_do_table(struct sk_buff *skb,
 	unsigned int addend;
 
 	/* Initialization */
-	stackidx = 0;
-	ip = ip_hdr(skb);
-	indev = state->in ? state->in->name : nulldevname;
-	outdev = state->out ? state->out->name : nulldevname;
-	/* We handle fragments by dealing with the first fragment as
-	 * if it was a normal packet.  All other fragments are treated
-	 * normally, except that they will NEVER match rules that ask
-	 * things we don't know, ie. tcp syn flag or ports).  If the
-	 * rule is also a fragment-specific rule, non-fragments won't
-	 * match it. */
-	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
-	acpar.thoff   = ip_hdrlen(skb);
-	acpar.hotdrop = false;
-	acpar.net     = state->net;
-	acpar.in      = state->in;
-	acpar.out     = state->out;
-	acpar.family  = NFPROTO_IPV4;
-	acpar.hooknum = hook;
-
 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
 	local_bh_disable();
-	addend = xt_write_recseq_begin();
 	private = table->private;
 	cpu        = smp_processor_id();
 	/*
@@ -278,6 +311,23 @@ ipt_do_table(struct sk_buff *skb,
 	 */
 	smp_read_barrier_depends();
 	table_base = private->entries;
+
+	e = get_entry(table_base, private->hook_entry[hook]);
+	if (ipt_handle_default_rule(e, &verdict)) {
+		struct xt_counters *counter;
+
+		counter = xt_get_this_cpu_counter(&e->counters);
+		ADD_COUNTER(*counter, skb->len, 1);
+		local_bh_enable();
+		return verdict;
+	}
+
+	stackidx = 0;
+	ip = ip_hdr(skb);
+	indev = state->in ? state->in->name : nulldevname;
+	outdev = state->out ? state->out->name : nulldevname;
+
+	addend = xt_write_recseq_begin();
 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
 
 	/* Switch to alternate jumpstack if we're being invoked via TEE.
@@ -290,7 +340,20 @@ ipt_do_table(struct sk_buff *skb,
 	if (static_key_false(&xt_tee_enabled))
 		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
-	e = get_entry(table_base, private->hook_entry[hook]);
+	/* We handle fragments by dealing with the first fragment as
+	 * if it was a normal packet.  All other fragments are treated
+	 * normally, except that they will NEVER match rules that ask
+	 * things we don't know, ie. tcp syn flag or ports).  If the
+	 * rule is also a fragment-specific rule, non-fragments won't
+	 * match it. */
+	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+	acpar.thoff   = ip_hdrlen(skb);
+	acpar.hotdrop = false;
+	acpar.net     = state->net;
+	acpar.in      = state->in;
+	acpar.out     = state->out;
+	acpar.family  = NFPROTO_IPV4;
+	acpar.hooknum = hook;
 
 	do {
 		const struct xt_entry_target *t;
@@ -545,6 +608,8 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
 	struct xt_entry_match *ematch;
 	unsigned long pcnt;
 
+	ip_checkdefault(&e->ip);
+
 	pcnt = xt_percpu_counter_alloc();
 	if (IS_ERR_VALUE(pcnt))
 		return -ENOMEM;
@@ -824,6 +889,7 @@ copy_entries_to_user(unsigned int total_size,
 	const struct xt_table_info *private = table->private;
 	int ret = 0;
 	const void *loc_cpu_entry;
+	u8 flags;
 
 	counters = alloc_counters(table);
 	if (IS_ERR(counters))
@@ -851,6 +917,14 @@ copy_entries_to_user(unsigned int total_size,
 			goto free_counters;
 		}
 
+		flags = e->ip.flags & IPT_F_MASK;
+		if (copy_to_user(userptr + off
+				 + offsetof(struct ipt_entry, ip.flags),
+				 &flags, sizeof(flags)) != 0) {
+			ret = -EFAULT;
+			goto free_counters;
+		}
+
 		for (i = sizeof(struct ipt_entry);
 		     i < e->target_offset;
 		     i += m->u.match_size) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6b3d27e50..012ba8d8e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -665,9 +665,9 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
 	if (tcp_should_autocork(sk, skb, size_goal)) {
 
 		/* avoid atomic op if TSQ_THROTTLED bit is already set */
-		if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
+		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
-			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 		}
 		/* It is possible TX completion already happened
 		 * before we set TSQ_THROTTLED.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6988566dc..4fa4a8314 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -446,7 +446,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 			if (!sock_owned_by_user(sk)) {
 				tcp_v4_mtu_reduced(sk);
 			} else {
-				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
+				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
 					sock_hold(sk);
 			}
 			goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f2b0facab..493876cfa 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -772,25 +772,27 @@ static void tcp_tasklet_func(unsigned long data)
 		list_del(&tp->tsq_node);
 
 		sk = (struct sock *)tp;
-		bh_lock_sock(sk);
-
-		if (!sock_owned_by_user(sk)) {
-			tcp_tsq_handler(sk);
-		} else {
-			/* defer the work to tcp_release_cb() */
-			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
+		smp_mb__before_atomic();
+		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
+
+		if (!sk->sk_lock.owned &&
+		    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
+			bh_lock_sock(sk);
+			if (!sock_owned_by_user(sk)) {
+				clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
+				tcp_tsq_handler(sk);
+			}
+			bh_unlock_sock(sk);
 		}
-		bh_unlock_sock(sk);
 
-		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
 		sk_free(sk);
 	}
 }
 
-#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
-			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
-			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
-			  (1UL << TCP_MTU_REDUCED_DEFERRED))
+#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
+			  TCPF_WRITE_TIMER_DEFERRED |	\
+			  TCPF_DELACK_TIMER_DEFERRED |	\
+			  TCPF_MTU_REDUCED_DEFERRED)
 /**
  * tcp_release_cb - tcp release_sock() callback
  * @sk: socket
@@ -800,18 +802,17 @@ static void tcp_tasklet_func(unsigned long data)
  */
 void tcp_release_cb(struct sock *sk)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
 	unsigned long flags, nflags;
 
 	/* perform an atomic operation only if at least one flag is set */
 	do {
-		flags = tp->tsq_flags;
+		flags = sk->sk_tsq_flags;
 		if (!(flags & TCP_DEFERRED_ALL))
 			return;
 		nflags = flags & ~TCP_DEFERRED_ALL;
-	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
+	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
 
-	if (flags & (1UL << TCP_TSQ_DEFERRED))
+	if (flags & TCPF_TSQ_DEFERRED)
 		tcp_tsq_handler(sk);
 
 	/* Here begins the tricky part :
@@ -825,15 +826,15 @@ void tcp_release_cb(struct sock *sk)
 	 */
 	sock_release_ownership(sk);
 
-	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
+	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
 		tcp_write_timer_handler(sk);
 		__sock_put(sk);
 	}
-	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
+	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
 		tcp_delack_timer_handler(sk);
 		__sock_put(sk);
 	}
-	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
+	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
 		__sock_put(sk);
 	}
@@ -863,6 +864,7 @@ void tcp_wfree(struct sk_buff *skb)
 {
 	struct sock *sk = skb->sk;
 	struct tcp_sock *tp = tcp_sk(sk);
+	unsigned long flags, nval, oval;
 	int wmem;
 
 	/* Keep one reference on sk_wmem_alloc.
@@ -880,16 +882,25 @@ void tcp_wfree(struct sk_buff *skb)
 	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
 		goto out;
 
-	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
-	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
-		unsigned long flags;
+	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
 		struct tsq_tasklet *tsq;
+		bool empty;
+
+		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
+			goto out;
+
+		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
+		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
+		if (nval != oval)
+			continue;
 
 		/* queue this socket to tasklet queue */
 		local_irq_save(flags);
 		tsq = this_cpu_ptr(&tsq_tasklet);
+		empty = list_empty(&tsq->head);
 		list_add(&tp->tsq_node, &tsq->head);
-		tasklet_schedule(&tsq->tasklet);
+		if (empty)
+			tasklet_schedule(&tsq->tasklet);
 		local_irq_restore(flags);
 		return;
 	}
@@ -1932,26 +1943,26 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
  */
 static int tcp_mtu_probe(struct sock *sk)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb, *nskb, *next;
 	struct net *net = sock_net(sk);
-	int len;
 	int probe_size;
 	int size_needed;
-	int copy;
+	int copy, len;
 	int mss_now;
 	int interval;
 
 	/* Not currently probing/verifying,
 	 * not in recovery,
 	 * have enough cwnd, and
-	 * not SACKing (the variable headers throw things off) */
-	if (!icsk->icsk_mtup.enabled ||
-	    icsk->icsk_mtup.probe_size ||
-	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
-	    tp->snd_cwnd < 11 ||
-	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
+	 * not SACKing (the variable headers throw things off)
+	 */
+	if (likely(!icsk->icsk_mtup.enabled ||
+		   icsk->icsk_mtup.probe_size ||
+		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
+		   tp->snd_cwnd < 11 ||
+		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
 		return -1;
 
 	/* Use binary search for probe_size between tcp_mss_base,
@@ -2091,7 +2102,16 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 	limit <<= factor;
 
 	if (atomic_read(&sk->sk_wmem_alloc) > limit) {
-		set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
+		/* Always send the 1st or 2nd skb in write queue.
+		 * No need to wait for TX completion to call us back,
+		 * after softirq/tasklet schedule.
+		 * This helps when TX completions are delayed too much.
+		 */
+		if (skb == sk->sk_write_queue.next ||
+		    skb->prev == sk->sk_write_queue.next)
+			return false;
+
+		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 		/* It is possible TX completion already happened
 		 * before we set TSQ_THROTTLED, so we must
 		 * test again the condition.
@@ -2189,6 +2209,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
 			break;
 
+		if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
+			clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
 		if (tcp_small_queue_check(sk, skb, 0))
 			break;
 
@@ -3501,8 +3523,6 @@ void tcp_send_ack(struct sock *sk)
 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
 	 * too much.
 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
-	 * We also avoid tcp_wfree() overhead (cache line miss accessing
-	 * tp->tsq_flags) by using regular sock_wfree()
 	 */
 	skb_set_tcp_pure_ack(buff);
 
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 74db43b47..81357c46d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -311,7 +311,7 @@ static void tcp_delack_timer(unsigned long data)
 		inet_csk(sk)->icsk_ack.blocked = 1;
 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 		/* deleguate our work to tcp_release_cb() */
-		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
 			sock_hold(sk);
 	}
 	bh_unlock_sock(sk);
@@ -594,7 +594,7 @@ static void tcp_write_timer(unsigned long data)
 		tcp_write_timer_handler(sk);
 	} else {
 		/* delegate our work to tcp_release_cb() */
-		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
 			sock_hold(sk);
 	}
 	bh_unlock_sock(sk);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ec849d88a..12d011da1 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -77,6 +77,10 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
 		err = -EACCES;
 		rt = net->ipv6.ip6_prohibit_entry;
 		goto discard_pkt;
+       case FR_ACT_POLICY_FAILED:
+               err = -EACCES;
+               rt = net->ipv6.ip6_policy_failed_entry;
+               goto discard_pkt;
 	}
 
 	tb_id = fib_rule_get_table(rule, arg);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 12b2fd512..68e205d27 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -16,6 +16,8 @@
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
  *
+ *	Changes:
+ * Steven Barth <cyrus@openwrt.org>:		MAP-E FMR support
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -72,9 +74,9 @@ static bool log_ecn_error = true;
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+static u32 HASH(const struct in6_addr *addr)
 {
-	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+	u32 hash = ipv6_addr_hash(addr);
 
 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
 }
@@ -141,20 +143,29 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 static struct ip6_tnl *
 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
 {
-	unsigned int hash = HASH(remote, local);
+	unsigned int hash = HASH(local);
 	struct ip6_tnl *t;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	struct in6_addr any;
+	struct __ip6_tnl_fmr *fmr;
 
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
-		if (ipv6_addr_equal(local, &t->parms.laddr) &&
-		    ipv6_addr_equal(remote, &t->parms.raddr) &&
-		    (t->dev->flags & IFF_UP))
+		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+				!(t->dev->flags & IFF_UP))
+			continue;
+
+		if (ipv6_addr_equal(remote, &t->parms.raddr))
 			return t;
+
+		for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+			if (ipv6_prefix_equal(remote, &fmr->ip6_prefix,
+					fmr->ip6_prefix_len))
+				return t;
+		}
 	}
 
 	memset(&any, 0, sizeof(any));
-	hash = HASH(&any, local);
+	hash = HASH(local);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
 		    ipv6_addr_any(&t->parms.raddr) &&
@@ -162,7 +173,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
 			return t;
 	}
 
-	hash = HASH(remote, &any);
+	hash = HASH(&any);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
 		    ipv6_addr_any(&t->parms.laddr) &&
@@ -202,7 +213,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
 
 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
 		prio = 1;
-		h = HASH(remote, local);
+		h = HASH(local);
 	}
 	return &ip6n->tnls[prio][h];
 }
@@ -381,6 +392,12 @@ ip6_tnl_dev_uninit(struct net_device *dev)
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+
 	if (dev == ip6n->fb_tnl_dev)
 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
 	else
@@ -777,6 +794,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
+/**
+ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
+ *   @dest: destination IPv6 address buffer
+ *   @skb: received socket buffer
+ *   @fmr: MAP FMR
+ *   @xmit: Calculate for xmit or rcv
+ **/
+static void ip4ip6_fmr_calc(struct in6_addr *dest,
+		const struct iphdr *iph, const uint8_t *end,
+		const struct __ip6_tnl_fmr *fmr, bool xmit)
+{
+	int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
+	u8 *portp = NULL;
+	bool use_dest_addr;
+	const struct iphdr *dsth = iph;
+
+	if ((u8*)dsth >= end)
+		return;
+
+	/* find significant IP header */
+	if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+		if (ih && ((u8*)&ih[1]) <= end && (
+			ih->type == ICMP_DEST_UNREACH ||
+			ih->type == ICMP_SOURCE_QUENCH ||
+			ih->type == ICMP_TIME_EXCEEDED ||
+			ih->type == ICMP_PARAMETERPROB ||
+			ih->type == ICMP_REDIRECT))
+				dsth = (const struct iphdr*)&ih[1];
+	}
+
+	/* in xmit-path use dest port by default and source port only if
+		this is an ICMP reply to something else; vice versa in rcv-path */
+	use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
+
+	/* get dst port */
+	if (((u8*)&dsth[1]) <= end && (
+		dsth->protocol == IPPROTO_UDP ||
+		dsth->protocol == IPPROTO_TCP ||
+		dsth->protocol == IPPROTO_SCTP ||
+		dsth->protocol == IPPROTO_DCCP)) {
+			/* for UDP, TCP, SCTP and DCCP source and dest port
+			follow IPv4 header directly */
+			portp = ((u8*)dsth) + dsth->ihl * 4;
+
+			if (use_dest_addr)
+				portp += sizeof(u16);
+	} else if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+
+		/* use icmp identifier as port */
+		if (((u8*)&ih) <= end && (
+		    (use_dest_addr && (
+		    ih->type == ICMP_ECHOREPLY ||
+			ih->type == ICMP_TIMESTAMPREPLY ||
+			ih->type == ICMP_INFO_REPLY ||
+			ih->type == ICMP_ADDRESSREPLY)) ||
+			(!use_dest_addr && (
+			ih->type == ICMP_ECHO ||
+			ih->type == ICMP_TIMESTAMP ||
+			ih->type == ICMP_INFO_REQUEST ||
+			ih->type == ICMP_ADDRESS)
+			)))
+				portp = (u8*)&ih->un.echo.id;
+	}
+
+	if ((portp && &portp[2] <= end) || psidlen == 0) {
+		int frombyte = fmr->ip6_prefix_len / 8;
+		int fromrem = fmr->ip6_prefix_len % 8;
+		int bytes = sizeof(struct in6_addr) - frombyte;
+		const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
+		u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
+		u64 t = 0;
+
+		/* extract PSID from port and add it to eabits */
+		u16 psidbits = 0;
+		if (psidlen > 0) {
+			psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
+			psidbits >>= 16 - psidlen - fmr->offset;
+			psidbits = (u16)(psidbits << (16 - psidlen));
+			eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
+		}
+
+		/* rewrite destination address */
+		*dest = fmr->ip6_prefix;
+		memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
+		dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
+
+		if (bytes > sizeof(u64))
+			bytes = sizeof(u64);
+
+		/* insert eabits */
+		memcpy(&t, &dest->s6_addr[frombyte], bytes);
+		t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
+			<< (64 - fmr->ea_len - fromrem));
+		t = cpu_to_be64(t | (eabits >> fromrem));
+		memcpy(&dest->s6_addr[frombyte], &t, bytes);
+	}
+}
+
+
 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 			 const struct tnl_ptk_info *tpi,
 			 struct metadata_dst *tun_dst,
@@ -829,6 +947,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 	skb_reset_network_header(skb);
 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
+	if (tpi->proto == htons(ETH_P_IP) &&
+		!ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
+			/* Packet didn't come from BR, so lookup FMR */
+			struct __ip6_tnl_fmr *fmr;
+			struct in6_addr expected = tunnel->parms.raddr;
+			for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
+				if (ipv6_prefix_equal(&ipv6h->saddr,
+					&fmr->ip6_prefix, fmr->ip6_prefix_len))
+						break;
+
+			/* Check that IPv6 matches IPv4 source to prevent spoofing */
+			if (fmr)
+				ip4ip6_fmr_calc(&expected, ip_hdr(skb),
+						skb_tail_pointer(skb), fmr, false);
+
+			if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
+				rcu_read_unlock();
+				goto drop;
+			}
+	}
+
 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
 
 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
@@ -958,6 +1097,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
 	opt->ops.opt_nflen = 8;
 }
 
+
 /**
  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
  *   @t: the outgoing tunnel device
@@ -1286,6 +1426,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct __ip6_tnl_fmr *fmr;
 	int encap_limit = -1;
 	__u16 offset;
 	struct flowi6 fl6;
@@ -1344,6 +1485,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 			fl6.flowi6_mark = skb->mark;
 	}
 
+	/* try to find matching FMR */
+	for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+		unsigned mshift = 32 - fmr->ip4_prefix_len;
+		if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
+				ntohl(ip_hdr(skb)->daddr) >> mshift)
+			break;
+	}
+
+	/* change dstaddr according to FMR */
+	if (fmr)
+		ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
+
 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
 		return -1;
 
@@ -1471,6 +1624,14 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 	t->parms.flowinfo = p->flowinfo;
 	t->parms.link = p->link;
 	t->parms.proto = p->proto;
+
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+	t->parms.fmrs = p->fmrs;
+
 	dst_cache_reset(&t->dst_cache);
 	ip6_tnl_link_config(t);
 	return 0;
@@ -1509,6 +1670,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
 	p->flowinfo = u->flowinfo;
 	p->link = u->link;
 	p->proto = u->proto;
+	p->fmrs = NULL;
 	memcpy(p->name, u->name, sizeof(u->name));
 }
 
@@ -1886,6 +2048,15 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
 	return 0;
 }
 
+static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
+	[IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
+	[IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
+};
+
 static void ip6_tnl_netlink_parms(struct nlattr *data[],
 				  struct __ip6_tnl_parm *parms)
 {
@@ -1920,6 +2091,46 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
 
 	if (data[IFLA_IPTUN_COLLECT_METADATA])
 		parms->collect_md = true;
+
+	if (data[IFLA_IPTUN_FMRS]) {
+		unsigned rem;
+		struct nlattr *fmr;
+		nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
+			struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
+			struct __ip6_tnl_fmr *nfmr;
+
+			nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX,
+				fmr, ip6_tnl_fmr_policy);
+
+			if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
+				continue;
+
+			nfmr->offset = 6;
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
+				nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
+					sizeof(nfmr->ip6_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
+				nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
+					sizeof(nfmr->ip4_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
+				nfmr->ip6_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
+				nfmr->ip4_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
+				nfmr->ea_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
+				nfmr->offset = nla_get_u8(c);
+
+			nfmr->next = parms->fmrs;
+			parms->fmrs = nfmr;
+		}
+	}
 }
 
 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
@@ -2029,6 +2240,12 @@ static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
 
 static size_t ip6_tnl_get_size(const struct net_device *dev)
 {
+	const struct ip6_tnl *t = netdev_priv(dev);
+	struct __ip6_tnl_fmr *c;
+	int fmrs = 0;
+	for (c = t->parms.fmrs; c; c = c->next)
+		++fmrs;
+
 	return
 		/* IFLA_IPTUN_LINK */
 		nla_total_size(4) +
@@ -2056,6 +2273,24 @@ static size_t ip6_tnl_get_size(const struct net_device *dev)
 		nla_total_size(2) +
 		/* IFLA_IPTUN_COLLECT_METADATA */
 		nla_total_size(0) +
+		/* IFLA_IPTUN_FMRS */
+		nla_total_size(0) +
+		(
+			/* nest */
+			nla_total_size(0) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX */
+			nla_total_size(sizeof(struct in6_addr)) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX */
+			nla_total_size(sizeof(struct in_addr)) +
+			/* IFLA_IPTUN_FMR_EA_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_OFFSET */
+			nla_total_size(1)
+		) * fmrs +
 		0;
 }
 
@@ -2063,6 +2298,9 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
 	struct ip6_tnl *tunnel = netdev_priv(dev);
 	struct __ip6_tnl_parm *parm = &tunnel->parms;
+	struct __ip6_tnl_fmr *c;
+	int fmrcnt = 0;
+	struct nlattr *fmrs;
 
 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
@@ -2071,9 +2309,27 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
-	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
+	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
+	    !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS)))
 		goto nla_put_failure;
 
+	for (c = parm->fmrs; c; c = c->next) {
+		struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt);
+		if (!fmr ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
+				sizeof(c->ip6_prefix), &c->ip6_prefix) ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
+				sizeof(c->ip4_prefix), &c->ip4_prefix) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
+				goto nla_put_failure;
+
+		nla_nest_end(skb, fmr);
+	}
+	nla_nest_end(skb, fmrs);
+
 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
@@ -2111,6 +2367,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
+	[IFLA_IPTUN_FMRS]		= { .type = NLA_NESTED },
 };
 
 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 117405dd0..62d4cac16 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -167,6 +167,8 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
 		return -ENETUNREACH;
 	case FR_ACT_PROHIBIT:
 		return -EACCES;
+	case FR_ACT_POLICY_FAILED:
+		return -EACCES;
 	case FR_ACT_BLACKHOLE:
 	default:
 		return -EINVAL;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 61729641e..ee254381c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -91,6 +91,8 @@ static int		ip6_pkt_discard(struct sk_buff *skb);
 static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static int		ip6_pkt_prohibit(struct sk_buff *skb);
 static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+static int              ip6_pkt_policy_failed(struct sk_buff *skb);
+static int              ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static void		ip6_link_failure(struct sk_buff *skb);
 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 					   struct sk_buff *skb, u32 mtu);
@@ -300,6 +302,22 @@ static const struct rt6_info ip6_prohibit_entry_template = {
 	.rt6i_ref	= ATOMIC_INIT(1),
 };
 
+static const struct rt6_info ip6_policy_failed_entry_template = {
+       .dst = {
+               .__refcnt       = ATOMIC_INIT(1),
+               .__use          = 1,
+               .obsolete       = DST_OBSOLETE_FORCE_CHK,
+               .error          = -EACCES,
+               .input          = ip6_pkt_policy_failed,
+               .output         = ip6_pkt_policy_failed_out,
+       },
+       .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
+       .rt6i_protocol  = RTPROT_KERNEL,
+       .rt6i_metric    = ~(u32) 0,
+       .rt6i_ref       = ATOMIC_INIT(1),
+};
+
+
 static const struct rt6_info ip6_blk_hole_entry_template = {
 	.dst = {
 		.__refcnt	= ATOMIC_INIT(1),
@@ -1966,6 +1984,11 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
 			rt->dst.output = ip6_pkt_prohibit_out;
 			rt->dst.input = ip6_pkt_prohibit;
 			break;
+		case RTN_POLICY_FAILED:
+			rt->dst.error = -EACCES;
+			rt->dst.output = ip6_pkt_policy_failed_out;
+			rt->dst.input = ip6_pkt_policy_failed;
+			break;
 		case RTN_THROW:
 		case RTN_UNREACHABLE:
 		default:
@@ -2609,6 +2632,17 @@ static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
 }
 
+static int ip6_pkt_policy_failed(struct sk_buff *skb)
+{
+	return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES);
+}
+
+static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	skb->dev = skb_dst(skb)->dev;
+	return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES);
+}
+
 /*
  *	Allocate a dst for local (unicast / anycast) address.
  */
@@ -2844,7 +2878,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (rtm->rtm_type == RTN_UNREACHABLE ||
 	    rtm->rtm_type == RTN_BLACKHOLE ||
 	    rtm->rtm_type == RTN_PROHIBIT ||
-	    rtm->rtm_type == RTN_THROW)
+            rtm->rtm_type == RTN_THROW ||
+            rtm->rtm_type == RTN_POLICY_FAILED)
 		cfg->fc_flags |= RTF_REJECT;
 
 	if (rtm->rtm_type == RTN_LOCAL)
@@ -3216,6 +3251,9 @@ static int rt6_fill_node(struct net *net,
 		case -EACCES:
 			rtm->rtm_type = RTN_PROHIBIT;
 			break;
+		case -EPERM:
+			rtm->rtm_type = RTN_POLICY_FAILED;
+			break;
 		case -EAGAIN:
 			rtm->rtm_type = RTN_THROW;
 			break;
@@ -3492,6 +3530,8 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
+		net->ipv6.ip6_policy_failed_entry->dst.dev = dev;
+		net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev);
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
@@ -3714,6 +3754,17 @@ static int __net_init ip6_route_net_init(struct net *net)
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
 			 ip6_template_metrics, true);
+
+	net->ipv6.ip6_policy_failed_entry =
+		kmemdup(&ip6_policy_failed_entry_template,
+			sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL);
+	if (!net->ipv6.ip6_policy_failed_entry)
+		goto out_ip6_blk_hole_entry;
+	net->ipv6.ip6_policy_failed_entry->dst.path =
+		(struct dst_entry *)net->ipv6.ip6_policy_failed_entry;
+	net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+	dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst,
+			 ip6_template_metrics, true);
 #endif
 
 	net->ipv6.sysctl.flush_delay = 0;
@@ -3732,6 +3783,8 @@ out:
 	return ret;
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+out_ip6_blk_hole_entry:
+	kfree(net->ipv6.ip6_blk_hole_entry);
 out_ip6_prohibit_entry:
 	kfree(net->ipv6.ip6_prohibit_entry);
 out_ip6_null_entry:
@@ -3749,6 +3802,7 @@ static void __net_exit ip6_route_net_exit(struct net *net)
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	kfree(net->ipv6.ip6_prohibit_entry);
 	kfree(net->ipv6.ip6_blk_hole_entry);
+	kfree(net->ipv6.ip6_policy_failed_entry);
 #endif
 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
@@ -3851,6 +3905,20 @@ int __init ip6_route_init(void)
 
 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
 
+	/* Registering of the loopback is done before this portion of code,
+	 * the loopback reference in rt6_info will not be taken, do it
+	 * manually for init_net */
+	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+	init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_policy_failed_entry->rt6i_idev =
+		in6_dev_get(init_net.loopback_dev);
+  #endif
 	ret = fib6_init();
 	if (ret)
 		goto out_register_subsys;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7ac2365aa..cf0cf9f0a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -404,7 +404,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		if (!sock_owned_by_user(sk))
 			tcp_v6_mtu_reduced(sk);
 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
-					   &tp->tsq_flags))
+					   &sk->sk_tsq_flags))
 			sock_hold(sk);
 		goto out;
 	}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e8d56d9a4..63073be30 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -10,7 +10,7 @@ config NETFILTER_INGRESS
 	  infrastructure.
 
 config NETFILTER_NETLINK
-	tristate
+	tristate "Netfilter NFNETLINK interface"
 
 config NETFILTER_NETLINK_ACCT
 tristate "Netfilter NFACCT over NFNETLINK interface"
@@ -114,6 +114,18 @@ config NF_CONNTRACK_EVENTS
 
 	  If unsure, say `N'.
 
+config NF_CONNTRACK_RTCACHE
+	tristate "Cache route entries in conntrack objects"
+	depends on NETFILTER_ADVANCED
+	depends on NF_CONNTRACK
+	help
+	  If this option is enabled, the connection tracking code will
+	  cache routing information for each connection that is being
+	  forwarded, at a cost of 32 bytes per conntrack object.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+	  The module will be called nf_conntrack_rtcache.
+
 config NF_CONNTRACK_TIMEOUT
 	bool  'Connection tracking timeout'
 	depends on NETFILTER_ADVANCED
@@ -206,7 +218,6 @@ config NF_CONNTRACK_FTP
 
 config NF_CONNTRACK_H323
 	tristate "H.323 protocol support"
-	depends on IPV6 || IPV6=n
 	depends on NETFILTER_ADVANCED
 	help
 	  H.323 is a VoIP signalling protocol from ITU-T. As one of the most
@@ -956,7 +967,6 @@ config NETFILTER_XT_TARGET_SECMARK
 
 config NETFILTER_XT_TARGET_TCPMSS
 	tristate '"TCPMSS" target support'
-	depends on IPV6 || IPV6=n
 	default m if NETFILTER_ADVANCED=n
 	---help---
 	  This option adds a `TCPMSS' target, which allows you to alter the
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index c23c3c844..8adef5136 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -16,6 +16,9 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 # connection tracking
 obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
 
+# optional conntrack route cache extension
+obj-$(CONFIG_NF_CONNTRACK_RTCACHE) += nf_conntrack_rtcache.o
+
 # SCTP protocol connection tracking
 obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
 obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 69f687740..f24b62668 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -33,6 +33,9 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
+/* Do not check the TCP window for incoming packets  */
+static int nf_ct_tcp_no_window_check __read_mostly = 1;
+
 /* "Be conservative in what you do,
     be liberal in what you accept from others."
     If it's non-zero, we mark only out of window RST segments as INVALID. */
@@ -513,6 +516,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
 	s32 receiver_offset;
 	bool res, in_recv_win;
 
+	if (nf_ct_tcp_no_window_check)
+		return true;
+
 	/*
 	 * Get the required data from the packet.
 	 */
@@ -1479,6 +1485,13 @@ static struct ctl_table tcp_sysctl_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname       = "nf_conntrack_tcp_no_window_check",
+		.data           = &nf_ct_tcp_no_window_check,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
 	{ }
 };
 #endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/nf_conntrack_rtcache.c b/net/netfilter/nf_conntrack_rtcache.c
new file mode 100644
index 000000000..1c6cb7484
--- /dev/null
+++ b/net/netfilter/nf_conntrack_rtcache.c
@@ -0,0 +1,413 @@
+/* route cache for netfilter.
+ *
+ * (C) 2014 Red Hat GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <net/dst.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_rtcache.h>
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+#include <net/ip6_fib.h>
+#endif
+
+static void __nf_conn_rtcache_destroy(struct nf_conn_rtcache *rtc,
+				      enum ip_conntrack_dir dir)
+{
+	struct dst_entry *dst = rtc->cached_dst[dir].dst;
+
+	dst_release(dst);
+}
+
+static void nf_conn_rtcache_destroy(struct nf_conn *ct)
+{
+	struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+
+	if (!rtc)
+		return;
+
+	__nf_conn_rtcache_destroy(rtc, IP_CT_DIR_ORIGINAL);
+	__nf_conn_rtcache_destroy(rtc, IP_CT_DIR_REPLY);
+}
+
+static void nf_ct_rtcache_ext_add(struct nf_conn *ct)
+{
+	struct nf_conn_rtcache *rtc;
+
+	rtc = nf_ct_ext_add(ct, NF_CT_EXT_RTCACHE, GFP_ATOMIC);
+	if (rtc) {
+		rtc->cached_dst[IP_CT_DIR_ORIGINAL].iif = -1;
+		rtc->cached_dst[IP_CT_DIR_ORIGINAL].dst = NULL;
+		rtc->cached_dst[IP_CT_DIR_REPLY].iif = -1;
+		rtc->cached_dst[IP_CT_DIR_REPLY].dst = NULL;
+	}
+}
+
+static struct nf_conn_rtcache *nf_ct_rtcache_find_usable(struct nf_conn *ct)
+{
+	if (nf_ct_is_untracked(ct))
+		return NULL;
+	return nf_ct_rtcache_find(ct);
+}
+
+static struct dst_entry *
+nf_conn_rtcache_dst_get(const struct nf_conn_rtcache *rtc,
+			enum ip_conntrack_dir dir)
+{
+	return rtc->cached_dst[dir].dst;
+}
+
+static u32 nf_rtcache_get_cookie(int pf, const struct dst_entry *dst)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+	if (pf == NFPROTO_IPV6) {
+		const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+		if (rt->rt6i_node)
+			return (u32)rt->rt6i_node->fn_sernum;
+	}
+#endif
+	return 0;
+}
+
+static void nf_conn_rtcache_dst_set(int pf,
+				    struct nf_conn_rtcache *rtc,
+				    struct dst_entry *dst,
+				    enum ip_conntrack_dir dir, int iif)
+{
+	if (rtc->cached_dst[dir].iif != iif)
+		rtc->cached_dst[dir].iif = iif;
+
+	if (rtc->cached_dst[dir].dst != dst) {
+		struct dst_entry *old;
+
+		dst_hold(dst);
+
+		old = xchg(&rtc->cached_dst[dir].dst, dst);
+		dst_release(old);
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+		if (pf == NFPROTO_IPV6)
+			rtc->cached_dst[dir].cookie =
+				nf_rtcache_get_cookie(pf, dst);
+#endif
+	}
+}
+
+static void nf_conn_rtcache_dst_obsolete(struct nf_conn_rtcache *rtc,
+					 enum ip_conntrack_dir dir)
+{
+	struct dst_entry *old;
+
+	pr_debug("Invalidate iif %d for dir %d on cache %p\n",
+		 rtc->cached_dst[dir].iif, dir, rtc);
+
+	old = xchg(&rtc->cached_dst[dir].dst, NULL);
+	dst_release(old);
+	rtc->cached_dst[dir].iif = -1;
+}
+
+static unsigned int nf_rtcache_in(u8 pf,
+				  struct sk_buff *skb,
+				  const struct nf_hook_state *state)
+{
+	struct nf_conn_rtcache *rtc;
+	enum ip_conntrack_info ctinfo;
+	enum ip_conntrack_dir dir;
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	int iif;
+	u32 cookie;
+
+	if (skb_dst(skb) || skb->sk)
+		return NF_ACCEPT;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return NF_ACCEPT;
+
+	rtc = nf_ct_rtcache_find_usable(ct);
+	if (!rtc)
+		return NF_ACCEPT;
+
+	/* if iif changes, don't use cache and let ip stack
+	 * do route lookup.
+	 *
+	 * If rp_filter is enabled it might toss skb, so
+	 * we don't want to avoid these checks.
+	 */
+	dir = CTINFO2DIR(ctinfo);
+	iif = nf_conn_rtcache_iif_get(rtc, dir);
+	if (state->in->ifindex != iif) {
+		pr_debug("ct %p, iif %d, cached iif %d, skip cached entry\n",
+			 ct, iif, state->in->ifindex);
+		return NF_ACCEPT;
+	}
+	dst = nf_conn_rtcache_dst_get(rtc, dir);
+	if (!dst)
+		return NF_ACCEPT;
+
+	cookie = nf_rtcache_get_cookie(pf, dst);
+
+	dst = dst_check(dst, cookie);
+	pr_debug("obtained dst %p for skb %p, cookie %d\n", dst, skb, cookie);
+	if (likely(dst))
+		skb_dst_set_noref(skb, dst);
+	else
+		nf_conn_rtcache_dst_obsolete(rtc, dir);
+
+	return NF_ACCEPT;
+}
+
+static unsigned int nf_rtcache_forward(u8 pf,
+				       struct sk_buff *skb,
+				       const struct nf_hook_state *state)
+{
+	struct nf_conn_rtcache *rtc;
+	enum ip_conntrack_info ctinfo;
+	enum ip_conntrack_dir dir;
+	struct nf_conn *ct;
+	struct dst_entry *dst = skb_dst(skb);
+	int iif;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return NF_ACCEPT;
+
+	if (dst && dst_xfrm(dst))
+		return NF_ACCEPT;
+
+	if (!nf_ct_is_confirmed(ct)) {
+		if (WARN_ON(nf_ct_rtcache_find(ct)))
+			return NF_ACCEPT;
+		nf_ct_rtcache_ext_add(ct);
+		return NF_ACCEPT;
+	}
+
+	rtc = nf_ct_rtcache_find_usable(ct);
+	if (!rtc)
+		return NF_ACCEPT;
+
+	dir = CTINFO2DIR(ctinfo);
+	iif = nf_conn_rtcache_iif_get(rtc, dir);
+	pr_debug("ct %p, skb %p, dir %d, iif %d, cached iif %d\n",
+		 ct, skb, dir, iif, state->in->ifindex);
+	if (likely(state->in->ifindex == iif))
+		return NF_ACCEPT;
+
+	nf_conn_rtcache_dst_set(pf, rtc, skb_dst(skb), dir, state->in->ifindex);
+	return NF_ACCEPT;
+}
+
+static unsigned int nf_rtcache_in4(void *priv,
+				   struct sk_buff *skb,
+				  const struct nf_hook_state *state)
+{
+	return nf_rtcache_in(NFPROTO_IPV4, skb, state);
+}
+
+static unsigned int nf_rtcache_forward4(void *priv,
+					struct sk_buff *skb,
+				       const struct nf_hook_state *state)
+{
+	return nf_rtcache_forward(NFPROTO_IPV4, skb, state);
+}
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+static unsigned int nf_rtcache_in6(void *priv,
+				   struct sk_buff *skb,
+				  const struct nf_hook_state *state)
+{
+	return nf_rtcache_in(NFPROTO_IPV6, skb, state);
+}
+
+static unsigned int nf_rtcache_forward6(void *priv,
+					struct sk_buff *skb,
+				       const struct nf_hook_state *state)
+{
+	return nf_rtcache_forward(NFPROTO_IPV6, skb, state);
+}
+#endif
+
+static int nf_rtcache_dst_remove(struct nf_conn *ct, void *data)
+{
+	struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+	struct net_device *dev = data;
+
+	if (!rtc)
+		return 0;
+
+	if (dev->ifindex == rtc->cached_dst[IP_CT_DIR_ORIGINAL].iif ||
+	    dev->ifindex == rtc->cached_dst[IP_CT_DIR_REPLY].iif) {
+		nf_conn_rtcache_dst_obsolete(rtc, IP_CT_DIR_ORIGINAL);
+		nf_conn_rtcache_dst_obsolete(rtc, IP_CT_DIR_REPLY);
+	}
+
+	return 0;
+}
+
+static int nf_rtcache_netdev_event(struct notifier_block *this,
+				   unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net *net = dev_net(dev);
+
+	if (event == NETDEV_DOWN)
+		nf_ct_iterate_cleanup(net, nf_rtcache_dst_remove, dev, 0, 0);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_rtcache_notifier = {
+	.notifier_call = nf_rtcache_netdev_event,
+};
+
+static struct nf_hook_ops rtcache_ops[] = {
+	{
+		.hook		= nf_rtcache_in4,
+		.pf		= NFPROTO_IPV4,
+		.hooknum	= NF_INET_PRE_ROUTING,
+		.priority       = NF_IP_PRI_LAST,
+	},
+	{
+		.hook           = nf_rtcache_forward4,
+		.pf             = NFPROTO_IPV4,
+		.hooknum        = NF_INET_FORWARD,
+		.priority       = NF_IP_PRI_LAST,
+	},
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
+	{
+		.hook		= nf_rtcache_in6,
+		.pf		= NFPROTO_IPV6,
+		.hooknum	= NF_INET_PRE_ROUTING,
+		.priority       = NF_IP_PRI_LAST,
+	},
+	{
+		.hook           = nf_rtcache_forward6,
+		.pf             = NFPROTO_IPV6,
+		.hooknum        = NF_INET_FORWARD,
+		.priority       = NF_IP_PRI_LAST,
+	},
+#endif
+};
+
+static struct nf_ct_ext_type rtcache_extend __read_mostly = {
+	.len	= sizeof(struct nf_conn_rtcache),
+	.align	= __alignof__(struct nf_conn_rtcache),
+	.id	= NF_CT_EXT_RTCACHE,
+	.destroy = nf_conn_rtcache_destroy,
+};
+
+static int __init nf_conntrack_rtcache_init(void)
+{
+	int ret = nf_ct_extend_register(&rtcache_extend);
+
+	if (ret < 0) {
+		pr_err("nf_conntrack_rtcache: Unable to register extension\n");
+		return ret;
+	}
+
+	ret = nf_register_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+	if (ret < 0) {
+		nf_ct_extend_unregister(&rtcache_extend);
+		return ret;
+	}
+
+	ret = register_netdevice_notifier(&nf_rtcache_notifier);
+	if (ret) {
+		nf_unregister_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+		nf_ct_extend_unregister(&rtcache_extend);
+	}
+
+	return ret;
+}
+
+static int nf_rtcache_ext_remove(struct nf_conn *ct, void *data)
+{
+	struct nf_conn_rtcache *rtc = nf_ct_rtcache_find(ct);
+
+	return rtc;
+}
+
+static bool __exit nf_conntrack_rtcache_wait_for_dying(struct net *net)
+{
+	bool wait = false;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_node *n;
+		struct nf_conn *ct;
+		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		rcu_read_lock();
+		spin_lock_bh(&pcpu->lock);
+
+		hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			if (nf_ct_rtcache_find(ct) != NULL) {
+				wait = true;
+				break;
+			}
+		}
+		spin_unlock_bh(&pcpu->lock);
+		rcu_read_unlock();
+	}
+
+	return wait;
+}
+
+static void __exit nf_conntrack_rtcache_fini(void)
+{
+	struct net *net;
+	int count = 0;
+
+	/* remove hooks so no new connections get rtcache extension */
+	nf_unregister_hooks(rtcache_ops, ARRAY_SIZE(rtcache_ops));
+
+	synchronize_net();
+
+	unregister_netdevice_notifier(&nf_rtcache_notifier);
+
+	rtnl_lock();
+
+	/* zap all conntracks with rtcache extension */
+	for_each_net(net)
+		nf_ct_iterate_cleanup(net, nf_rtcache_ext_remove, NULL, 0, 0);
+
+	for_each_net(net) {
+		/* .. and make sure they're gone from dying list, too */
+		while (nf_conntrack_rtcache_wait_for_dying(net)) {
+			msleep(200);
+			WARN_ONCE(++count > 25, "Waiting for all rtcache conntracks to go away\n");
+		}
+	}
+
+	rtnl_unlock();
+	synchronize_net();
+	nf_ct_extend_unregister(&rtcache_extend);
+}
+module_init(nf_conntrack_rtcache_init);
+module_exit(nf_conntrack_rtcache_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("Conntrack route cache extension");
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5f446cd9f..0e8cab44f 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -17,6 +17,7 @@
 #include <linux/percpu.h>
 #include <linux/netdevice.h>
 #include <linux/security.h>
+#include <linux/inet.h>
 #include <net/net_namespace.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
@@ -298,10 +299,66 @@ static int ct_open(struct inode *inode, struct file *file)
 			sizeof(struct ct_iter_state));
 }
 
+struct kill_request {
+	u16 family;
+	union nf_inet_addr addr;
+};
+
+static int kill_matching(struct nf_conn *i, void *data)
+{
+	struct kill_request *kr = data;
+	struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+	if (!kr->family)
+		return 1;
+
+	if (t1->src.l3num != kr->family)
+		return 0;
+
+	return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
+		nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
+		nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
+		nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
+}
+
+static ssize_t ct_file_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	struct seq_file *seq = file->private_data;
+	struct net *net = seq_file_net(seq);
+	struct kill_request kr = { };
+	char req[INET6_ADDRSTRLEN] = { };
+
+	if (count == 0)
+		return 0;
+
+	if (count >= INET6_ADDRSTRLEN)
+		count = INET6_ADDRSTRLEN - 1;
+
+	if (copy_from_user(req, buf, count))
+		return -EFAULT;
+
+	if (strnchr(req, count, ':')) {
+		kr.family = AF_INET6;
+		if (!in6_pton(req, count, (void *)&kr.addr, '\n', NULL))
+			return -EINVAL;
+	} else if (strnchr(req, count, '.')) {
+		kr.family = AF_INET;
+		if (!in4_pton(req, count, (void *)&kr.addr, '\n', NULL))
+			return -EINVAL;
+	}
+
+	nf_ct_iterate_cleanup(net, kill_matching, &kr, 0, 0);
+
+	return count;
+}
+
 static const struct file_operations ct_file_ops = {
 	.owner   = THIS_MODULE,
 	.open    = ct_open,
 	.read    = seq_read,
+	.write	 = ct_file_write,
 	.llseek  = seq_lseek,
 	.release = seq_release_net,
 };
@@ -405,7 +462,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
 	kuid_t root_uid;
 	kgid_t root_gid;
 
-	pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
+	pde = proc_create("nf_conntrack", 0660, net->proc_net, &ct_file_ops);
 	if (!pde)
 		goto out_nf_conntrack;
 
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 2916f4815..c8df7278e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -95,6 +95,9 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
 	struct dst_entry *dst;
 	int err;
 
+	if (skb->dev && !dev_net(skb->dev)->xfrm.policy_count[XFRM_POLICY_OUT])
+		return 0;
+
 	err = xfrm_decode_session(skb, &fl, family);
 	if (err < 0)
 		return err;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b17f9097c..2e4b6ca92 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1778,6 +1778,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
 {
 	struct sock *sk;
 	struct sockaddr_pkt *spkt;
+	struct packet_sock *po;
 
 	/*
 	 *	When we registered the protocol we saved the socket in the data
@@ -1785,6 +1786,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
 	 */
 
 	sk = pt->af_packet_priv;
+	po = pkt_sk(sk);
 
 	/*
 	 *	Yank back the headers [hope the device set this
@@ -1797,7 +1799,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
 	 *	so that this procedure is noop.
 	 */
 
-	if (skb->pkt_type == PACKET_LOOPBACK)
+	if (!(po->pkt_type & (1 << skb->pkt_type)))
 		goto out;
 
 	if (!net_eq(dev_net(dev), sock_net(sk)))
@@ -2035,12 +2037,12 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 	unsigned int snaplen, res;
 	bool is_drop_n_account = false;
 
-	if (skb->pkt_type == PACKET_LOOPBACK)
-		goto drop;
-
 	sk = pt->af_packet_priv;
 	po = pkt_sk(sk);
 
+	if (!(po->pkt_type & (1 << skb->pkt_type)))
+		goto drop;
+
 	if (!net_eq(dev_net(dev), sock_net(sk)))
 		goto drop;
 
@@ -2166,12 +2168,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
 
-	if (skb->pkt_type == PACKET_LOOPBACK)
-		goto drop;
-
 	sk = pt->af_packet_priv;
 	po = pkt_sk(sk);
 
+	if (!(po->pkt_type & (1 << skb->pkt_type)))
+		goto drop;
+
 	if (!net_eq(dev_net(dev), sock_net(sk)))
 		goto drop;
 
@@ -3250,6 +3252,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
 	mutex_init(&po->pg_vec_lock);
 	po->rollover = NULL;
 	po->prot_hook.func = packet_rcv;
+	po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
 
 	if (sock->type == SOCK_PACKET)
 		po->prot_hook.func = packet_rcv_spkt;
@@ -3836,6 +3839,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
 		return 0;
 	}
+	case PACKET_RECV_TYPE:
+	{
+		unsigned int val;
+
+		if (optlen != sizeof(val))
+			return -EINVAL;
+		if (copy_from_user(&val, optval, sizeof(val)))
+			return -EFAULT;
+		po->pkt_type = val & ~BIT(PACKET_LOOPBACK);
+		return 0;
+	}
 	default:
 		return -ENOPROTOOPT;
 	}
@@ -3888,6 +3902,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 	case PACKET_VNET_HDR:
 		val = po->has_vnet_hdr;
 		break;
+	case PACKET_RECV_TYPE:
+		if (len > sizeof(unsigned int))
+			len = sizeof(unsigned int);
+		val = po->pkt_type;
+
+		data = &val;
+		break;
 	case PACKET_VERSION:
 		val = po->tp_version;
 		break;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 9ee46314b..76c895fcf 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -129,6 +129,7 @@ struct packet_sock {
 	struct net_device __rcu	*cached_dev;
 	int			(*xmit)(struct sk_buff *skb);
 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
+	unsigned int		pkt_type;
 };
 
 static struct packet_sock *pkt_sk(struct sock *sk)
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 868f1ad04..159b6ebea 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -1,7 +1,11 @@
 #
 # RF switch subsystem configuration
 #
-menuconfig RFKILL
+config RFKILL
+	bool
+	default y
+
+menuconfig RFKILL_FULL
 	tristate "RF switch subsystem support"
 	help
 	  Say Y here if you want to have control over RF switches
@@ -13,19 +17,19 @@ menuconfig RFKILL
 # LED trigger support
 config RFKILL_LEDS
 	bool
-	depends on RFKILL
+	depends on RFKILL_FULL
 	depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS
 	default y
 
 config RFKILL_INPUT
 	bool "RF switch input support" if EXPERT
-	depends on RFKILL
+	depends on RFKILL_FULL
 	depends on INPUT = y || RFKILL = INPUT
 	default y if !EXPERT
 
 config RFKILL_REGULATOR
 	tristate "Generic rfkill regulator driver"
-	depends on RFKILL || !RFKILL
+	depends on RFKILL_FULL || !RFKILL_FULL
 	depends on REGULATOR
 	help
           This options enable controlling radio transmitters connected to
@@ -36,7 +40,7 @@ config RFKILL_REGULATOR
 
 config RFKILL_GPIO
 	tristate "GPIO RFKILL driver"
-	depends on RFKILL
+	depends on RFKILL_FULL
 	depends on GPIOLIB || COMPILE_TEST
 	default n
 	help
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index 311768783..dbb01a864 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -4,6 +4,6 @@
 
 rfkill-y			+= core.o
 rfkill-$(CONFIG_RFKILL_INPUT)	+= input.o
-obj-$(CONFIG_RFKILL)		+= rfkill.o
+obj-$(CONFIG_RFKILL_FULL)	+= rfkill.o
 obj-$(CONFIG_RFKILL_REGULATOR)	+= rfkill-regulator.o
 obj-$(CONFIG_RFKILL_GPIO)	+= rfkill-gpio.o
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 6c606120a..4ea786eae 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,5 @@
 config WIRELESS_EXT
-	bool
+	bool "Wireless extensions"
 
 config WEXT_CORE
 	def_bool y
@@ -11,10 +11,10 @@ config WEXT_PROC
 	depends on WEXT_CORE
 
 config WEXT_SPY
-	bool
+	bool "WEXT_SPY"
 
 config WEXT_PRIV
-	bool
+	bool "WEXT_PRIV"
 
 config CFG80211
 	tristate "cfg80211 - wireless configuration API"
@@ -188,7 +188,7 @@ config CFG80211_WEXT_EXPORT
 	  wext compatibility symbols to be exported.
 
 config LIB80211
-	tristate
+	tristate "LIB80211"
 	default n
 	help
 	  This options enables a library of common routines used
@@ -197,13 +197,18 @@ config LIB80211
 	  Drivers should select this themselves if needed.
 
 config LIB80211_CRYPT_WEP
-	tristate
+	tristate "LIB80211_CRYPT_WEP"
+	select LIB80211
+
 
 config LIB80211_CRYPT_CCMP
-	tristate
+	tristate "LIB80211_CRYPT_CCMP"
+	select LIB80211
+
 
 config LIB80211_CRYPT_TKIP
-	tristate
+	tristate "LIB80211_CRYPT_TKIP"
+	select LIB80211
 
 config LIB80211_DEBUG
 	bool "lib80211 debugging messages"
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 7675d11ee..108d73273 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -93,7 +93,7 @@ modorder-target := $(obj)/modules.order
 # We keep a list of all modules in $(MODVERDIR)
 
 __build: $(if $(KBUILD_BUILTIN),$(builtin-target) $(lib-target) $(extra-y)) \
-	 $(if $(KBUILD_MODULES),$(obj-m) $(modorder-target)) \
+	 $(if $(KBUILD_MODULES),$(obj-m)) \
 	 $(subdir-ym) $(always)
 	@:
 
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014..5324c072c 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -344,7 +344,7 @@ cmd_bzip2 = (cat $(filter-out FORCE,$^) | \
 
 quiet_cmd_lzma = LZMA    $@
 cmd_lzma = (cat $(filter-out FORCE,$^) | \
-	lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+	lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
 	(rm -f $@ ; false)
 
 quiet_cmd_lzo = LZO     $@
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 0055b07b0..7710113e6 100755
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -229,7 +229,7 @@ cpio_list=
 output="/dev/stdout"
 output_file=""
 is_cpio_compressed=
-compr="gzip -n -9 -f"
+compr="gzip -n -9 -f -"
 
 arg="$1"
 case "$arg" in
@@ -245,13 +245,13 @@ case "$arg" in
 		output=${cpio_list}
 		echo "$output_file" | grep -q "\.gz$" \
                 && [ -x "`which gzip 2> /dev/null`" ] \
-                && compr="gzip -n -9 -f"
+                && compr="gzip -n -9 -f -"
 		echo "$output_file" | grep -q "\.bz2$" \
                 && [ -x "`which bzip2 2> /dev/null`" ] \
-                && compr="bzip2 -9 -f"
+                && compr="bzip2 -9 -f -"
 		echo "$output_file" | grep -q "\.lzma$" \
                 && [ -x "`which lzma 2> /dev/null`" ] \
-                && compr="lzma -9 -f"
+                && compr="lzma e -d20 -lc1 -lp2 -pb2 -eos -si -so"
 		echo "$output_file" | grep -q "\.xz$" \
                 && [ -x "`which xz 2> /dev/null`" ] \
                 && compr="xz --check=crc32 --lzma2=dict=1MiB"
@@ -318,7 +318,7 @@ if [ ! -z ${output_file} ]; then
 	if [ "${is_cpio_compressed}" = "compressed" ]; then
 		cat ${cpio_tfile} > ${output_file}
 	else
-		(cat ${cpio_tfile} | ${compr}  - > ${output_file}) \
+		(cat ${cpio_tfile} | ${compr} > ${output_file}) \
 		|| (rm -f ${output_file} ; false)
 	fi
 	[ -z ${cpio_file} ] && rm ${cpio_tfile}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 1f22a186c..8ee7b5539 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -61,6 +61,7 @@ static struct addr_range percpu_range = {
 static struct sym_entry *table;
 static unsigned int table_size, table_cnt;
 static int all_symbols = 0;
+static int uncompressed;
 static int absolute_percpu = 0;
 static char symbol_prefix_char = '\0';
 static int base_relative = 0;
@@ -446,6 +447,9 @@ static void write_src(void)
 
 	free(markers);
 
+	if (uncompressed)
+		return;
+
 	output_label("kallsyms_token_table");
 	off = 0;
 	for (i = 0; i < 256; i++) {
@@ -504,6 +508,9 @@ static void *find_token(unsigned char *str, int len, unsigned char *token)
 {
 	int i;
 
+	if (uncompressed)
+		return NULL;
+
 	for (i = 0; i < len - 1; i++) {
 		if (str[i] == token[0] && str[i+1] == token[1])
 			return &str[i];
@@ -576,6 +583,9 @@ static void optimize_result(void)
 {
 	int i, best;
 
+	if (uncompressed)
+		return;
+
 	/* using the '\0' symbol last allows compress_symbols to use standard
 	 * fast string functions */
 	for (i = 255; i >= 0; i--) {
@@ -764,6 +774,8 @@ int main(int argc, char **argv)
 				symbol_prefix_char = *p;
 			} else if (strcmp(argv[i], "--base-relative") == 0)
 				base_relative = 1;
+			else if (strcmp(argv[i], "--uncompressed") == 0)
+				uncompressed = 1;
 			else
 				usage();
 		}
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
index d135882e2..66e6e5ed1 100755
--- a/scripts/ld-version.sh
+++ b/scripts/ld-version.sh
@@ -1,5 +1,6 @@
-#!/usr/bin/awk -f
+#!/bin/sh
 # extract linker version number from stdin and turn into single number
+exec awk '
 	{
 	gsub(".*\\)", "");
 	gsub(".*version ", "");
@@ -8,3 +9,4 @@
 	print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
 	exit
 	}
+'
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index f742c6510..6aabf1d71 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -136,6 +136,10 @@ kallsyms()
 		kallsymopt="${kallsymopt} --base-relative"
 	fi
 
+	if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
+		kallsymopt="${kallsymopt} --uncompressed"
+	fi
+
 	local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
 		      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index bd8349759..80f214adb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1964,7 +1964,9 @@ static void read_symbols(char *modname)
 		symname = remove_dot(info.strtab + sym->st_name);
 
 		handle_modversions(mod, &info, sym, symname);
+#ifndef CONFIG_MODULE_STRIPPED
 		handle_moddevtable(mod, &info, sym, symname);
+#endif
 	}
 	if (!is_vmlinux(modname) ||
 	     (is_vmlinux(modname) && vmlinux_section_warnings))
@@ -2108,7 +2110,9 @@ static void add_header(struct buffer *b, struct module *mod)
 	buf_printf(b, "#include <linux/vermagic.h>\n");
 	buf_printf(b, "#include <linux/compiler.h>\n");
 	buf_printf(b, "\n");
+#ifndef CONFIG_MODULE_STRIPPED
 	buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
+#endif
 	buf_printf(b, "\n");
 	buf_printf(b, "__visible struct module __this_module\n");
 	buf_printf(b, "__attribute__((section(\".gnu.linkonce.this_module\"))) = {\n");
@@ -2125,16 +2129,20 @@ static void add_header(struct buffer *b, struct module *mod)
 
 static void add_intree_flag(struct buffer *b, int is_intree)
 {
+#ifndef CONFIG_MODULE_STRIPPED
 	if (is_intree)
 		buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+#endif
 }
 
 static void add_staging_flag(struct buffer *b, const char *name)
 {
+#ifndef CONFIG_MODULE_STRIPPED
 	static const char *staging_dir = "drivers/staging";
 
 	if (strncmp(staging_dir, name, strlen(staging_dir)) == 0)
 		buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
+#endif
 }
 
 /* In kernel, this size is defined in linux/module.h;
@@ -2238,11 +2246,13 @@ static void add_depends(struct buffer *b, struct module *mod,
 
 static void add_srcversion(struct buffer *b, struct module *mod)
 {
+#ifndef CONFIG_MODULE_STRIPPED
 	if (mod->srcversion[0]) {
 		buf_printf(b, "\n");
 		buf_printf(b, "MODULE_INFO(srcversion, \"%s\");\n",
 			   mod->srcversion);
 	}
+#endif
 }
 
 static void write_if_changed(struct buffer *b, const char *fname)
@@ -2476,7 +2486,9 @@ int main(int argc, char **argv)
 		add_staging_flag(&buf, mod->name);
 		err |= add_versions(&buf, mod);
 		add_depends(&buf, mod, modules);
+#ifndef CONFIG_MODULE_STRIPPED
 		add_moddevtable(&buf, mod);
+#endif
 		add_srcversion(&buf, mod);
 
 		sprintf(fname, "%s.mod.c", mod->name);
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 9749f9e8b..9639a67e7 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -16,13 +16,13 @@ config SND_DMAENGINE_PCM
 	tristate
 
 config SND_HWDEP
-	tristate
+	tristate "Sound hardware support"
 
 config SND_RAWMIDI
 	tristate
 
 config SND_COMPRESS_OFFLOAD
-	tristate
+	tristate "Compression offloading support"
 
 config SND_JACK
 	bool
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 1dcb95e76..9d6f97a7c 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -95,4 +95,4 @@ cxx_flags = -Wp,-MD,$(depfile),-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAG
 ###
 ## HOSTCC C flags
 
-host_c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -MD -MF $(depfile) -MT $@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h
index 84c17d836..ed9932e97 100644
--- a/tools/include/tools/be_byteshift.h
+++ b/tools/include/tools/be_byteshift.h
@@ -1,6 +1,10 @@
 #ifndef _TOOLS_BE_BYTESHIFT_H
 #define _TOOLS_BE_BYTESHIFT_H
 
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
 #include <stdint.h>
 
 static inline uint16_t __get_unaligned_be16(const uint8_t *p)
diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h
index 8fe9f2488..2e387b522 100644
--- a/tools/include/tools/le_byteshift.h
+++ b/tools/include/tools/le_byteshift.h
@@ -1,6 +1,10 @@
 #ifndef _TOOLS_LE_BYTESHIFT_H
 #define _TOOLS_LE_BYTESHIFT_H
 
+#ifndef __linux__
+#include "linux_types.h"
+#endif
+
 #include <stdint.h>
 
 static inline uint16_t __get_unaligned_le16(const uint8_t *p)
diff --git a/tools/include/tools/linux_types.h b/tools/include/tools/linux_types.h
new file mode 100644
index 000000000..f07ef8293
--- /dev/null
+++ b/tools/include/tools/linux_types.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_TYPES_H
+#define __LINUX_TYPES_H
+
+#include <stdint.h>
+
+typedef uint8_t __u8;
+typedef uint8_t __be8;
+typedef uint8_t __le8;
+
+typedef uint16_t __u16;
+typedef uint16_t __be16;
+typedef uint16_t __le16;
+
+typedef uint32_t __u32;
+typedef uint32_t __be32;
+typedef uint32_t __le32;
+
+typedef uint64_t __u64;
+typedef uint64_t __be64;
+typedef uint64_t __le64;
+
+#endif
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 41611d7f9..51333bee1 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -35,6 +35,7 @@
 #include <stdlib.h>
 #include <errno.h>
 #include <string.h>
+#include <strings.h>
 #include <ctype.h>
 #include <unistd.h>
 #include <stdarg.h>
diff --git a/tools/perf/pmu-events/json.c b/tools/perf/pmu-events/json.c
index f67bbb0aa..a72c70a97 100644
--- a/tools/perf/pmu-events/json.c
+++ b/tools/perf/pmu-events/json.c
@@ -38,7 +38,6 @@
 #include <unistd.h>
 #include "jsmn.h"
 #include "json.h"
-#include <linux/kernel.h>
 
 
 static char *mapfile(const char *fn, size_t *size)
diff --git a/usr/Makefile b/usr/Makefile
index e767f019a..17328d3c1 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -53,6 +53,8 @@ ifneq ($(wildcard $(obj)/.initramfs_data.cpio.d),)
 	include $(obj)/.initramfs_data.cpio.d
 endif
 
+deps_initramfs_sane := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v)))
+
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
@@ -61,14 +63,14 @@ targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 \
 	initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \
 	initramfs_data.cpio
 # do not try to update files included in initramfs
-$(deps_initramfs): ;
+$(deps_initramfs_sane): ;
 
-$(deps_initramfs): klibcdirs
+$(deps_initramfs_sane): klibcdirs
 # We rebuild initramfs_data.cpio if:
 # 1) Any included file is newer then initramfs_data.cpio
 # 2) There are changes in which files are included (added or deleted)
 # 3) If gen_init_cpio are newer than initramfs_data.cpio
 # 4) arguments to gen_initramfs.sh changes
-$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
+$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs_sane) klibcdirs
 	$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
 	$(call if_changed,initfs)
-- 
GitLab